From 6647bf7eaeb915e2d09db8b5c7584ee286962d3b Mon Sep 17 00:00:00 2001 From: Stefan van der Walt Date: Tue, 5 Aug 2008 09:20:07 +0000 Subject: Merge from documentation editor. --- numpy/lib/_datasource.py | 5 +- numpy/lib/arraysetops.py | 115 +-- numpy/lib/financial.py | 189 ++++- numpy/lib/format.py | 103 ++- numpy/lib/function_base.py | 1679 ++++++++++++++++++++++++++++++++++++-------- numpy/lib/getlimits.py | 92 ++- numpy/lib/index_tricks.py | 53 +- numpy/lib/io.py | 225 +++--- numpy/lib/machar.py | 82 ++- numpy/lib/polynomial.py | 540 ++++++++++---- numpy/lib/shape_base.py | 1000 ++++++++++++++++++-------- numpy/lib/stride_tricks.py | 6 +- numpy/lib/twodim_base.py | 447 ++++++++++-- numpy/lib/type_check.py | 160 ++++- numpy/lib/ufunclike.py | 77 +- numpy/lib/utils.py | 128 +++- 16 files changed, 3857 insertions(+), 1044 deletions(-) (limited to 'numpy/lib') diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index e026f6816..1201f3d7e 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -402,12 +402,13 @@ class DataSource (object): class Repository (DataSource): - """A data Repository where multiple DataSource's share a base URL/directory. + """ + A data Repository where multiple DataSource's share a base URL/directory. Repository extends DataSource by prepending a base URL (or directory) to all the files it handles. Use a Repository when you will be working with multiple files from one base URL. Initialize the Respository with the - base URL, then refer to each file by it's filename only. + base URL, then refer to each file by its filename only. *Methods*: diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py index 8fec23cd3..8bd76d17f 100644 --- a/numpy/lib/arraysetops.py +++ b/numpy/lib/arraysetops.py @@ -39,8 +39,8 @@ import time import numpy as np def ediff1d(ary, to_end=None, to_begin=None): - """The differences between consecutive elements of an array, possibly with - prefixed and/or appended values. + """ + The differences between consecutive elements of an array. Parameters ---------- @@ -75,31 +75,38 @@ def ediff1d(ary, to_end=None, to_begin=None): return ed def unique1d(ar1, return_index=False): - """Find the unique elements of 1D array. - - Most of the other array set operations operate on the unique arrays - generated by this function. + """ + Find the unique elements of an array. Parameters ---------- - ar1 : array - This array will be flattened if it is not already 1D. + ar1 : array-like + This array will be flattened if it is not already 1-D. return_index : bool, optional - If True, also return the indices against ar1 that result in the unique - array. + If True, also return the indices against `ar1` that result in the + unique array. Returns ------- - unique : array + unique : ndarray The unique values. - unique_indices : int array, optional - The indices of the unique values. Only provided if return_index is True. + unique_indices : ndarray, optional + The indices of the unique values. Only provided if `return_index` is + True. See Also -------- numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. + Examples + -------- + >>> np.unique1d([1, 1, 2, 2, 3, 3]) + array([1, 2, 3]) + >>> a = np.array([[1, 1], [2, 3]]) + >>> np.unique1d(a) + array([1, 2, 3]) + """ ar = np.asarray(ar1).flatten() if ar.size == 0: @@ -118,50 +125,60 @@ def unique1d(ar1, return_index=False): return ar[flag] def intersect1d(ar1, ar2): - """Intersection of 1D arrays with unique elements. - - Use unique1d() to generate arrays with only unique elements to use as inputs - to this function. Alternatively, use intersect1d_nu() which will find the - unique values for you. + """ + Intersection returning repeated or unique elements common to both arrays. Parameters ---------- - ar1 : array - ar2 : array + ar1,ar2 : array_like + Input arrays. Returns ------- - intersection : array + out : ndarray, shape(N,) + Sorted 1D array of common elements with repeating elements. See Also -------- + intersect1d_nu : Returns only unique common elements. numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. + Examples + -------- + >>> np.intersect1d([1,3,3],[3,1,1]) + array([1, 1, 3, 3]) + """ aux = np.concatenate((ar1,ar2)) aux.sort() return aux[aux[1:] == aux[:-1]] def intersect1d_nu(ar1, ar2): - """Intersection of 1D arrays with any elements. - - The input arrays do not have unique elements like intersect1d() requires. + """ + Intersection returning unique elements common to both arrays. Parameters ---------- - ar1 : array - ar2 : array + ar1,ar2 : array_like + Input arrays. Returns ------- - intersection : array + out : ndarray, shape(N,) + Sorted 1D array of common and unique elements. See Also -------- + intersect1d : Returns repeated or unique common elements. numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. + Examples + -------- + >>> np.intersect1d_nu([1,3,3],[3,1,1]) + array([1, 3]) + """ # Might be faster than unique1d( intersect1d( ar1, ar2 ) )? aux = np.concatenate((unique1d(ar1), unique1d(ar2))) @@ -169,15 +186,18 @@ def intersect1d_nu(ar1, ar2): return aux[aux[1:] == aux[:-1]] def setxor1d(ar1, ar2): - """Set exclusive-or of 1D arrays with unique elements. + """ + Set exclusive-or of 1D arrays with unique elements. - Use unique1d() to generate arrays with only unique elements to use as inputs - to this function. + Use unique1d() to generate arrays with only unique elements to use as + inputs to this function. Parameters ---------- ar1 : array + Input array. ar2 : array + Input array. Returns ------- @@ -202,20 +222,25 @@ def setxor1d(ar1, ar2): return aux[flag2] def setmember1d(ar1, ar2): - """Return a boolean array of shape of ar1 containing True where the elements - of ar1 are in ar2 and False otherwise. + """ + Return a boolean array set True where first element is in second array. + + Boolean array is the shape of `ar1` containing True where the elements + of `ar1` are in `ar2` and False otherwise. - Use unique1d() to generate arrays with only unique elements to use as inputs - to this function. + Use unique1d() to generate arrays with only unique elements to use as + inputs to this function. Parameters ---------- ar1 : array + Input array. ar2 : array + Input array. Returns ------- - mask : bool array + mask : bool-array The values ar1[mask] are in ar2. See Also @@ -252,17 +277,20 @@ def union1d(ar1, ar2): """ Union of 1D arrays with unique elements. - Use unique1d() to generate arrays with only unique elements to use as inputs - to this function. + Use unique1d() to generate arrays with only unique elements to use as + inputs to this function. Parameters ---------- - ar1 : array - ar2 : array + ar1 : array_like, shape(M,) + Input array. + ar2 : array_like, shape(N,) + Input array. Returns ------- union : array + Unique union of input arrays. See also -------- @@ -273,15 +301,18 @@ def union1d(ar1, ar2): return unique1d( np.concatenate( (ar1, ar2) ) ) def setdiff1d(ar1, ar2): - """Set difference of 1D arrays with unique elements. + """ + Set difference of 1D arrays with unique elements. - Use unique1d() to generate arrays with only unique elements to use as inputs - to this function. + Use unique1d() to generate arrays with only unique elements to use as + inputs to this function. Parameters ---------- ar1 : array + Input array. ar2 : array + Input comparison array. Returns ------- diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py index 9c5d2753a..a997cf6c9 100644 --- a/numpy/lib/financial.py +++ b/numpy/lib/financial.py @@ -52,7 +52,45 @@ def _convert_when(when): def fv(rate, nper, pmt, pv, when='end'): - """future value computed by solving the equation + """ + Compute the future value. + + Parameters + ---------- + rate : array-like + Rate of interest (per period) + nper : array-like + Number of compounding periods + pmt : array-like + Payment + pv : array-like + Present value + when : array-like + When payments are due ('begin' (1) or 'end' (0)) + + Notes + ----- + The future value is computed by solving the equation:: + + fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate * ((1+rate)**nper - 1) == 0 + + or, when ``rate == 0``:: + + fv + pv + pmt * nper == 0 + + Examples + -------- + What is the future value after 10 years of saving $100 now, with + an additional monthly savings of $100. Assume the interest rate is + 5% (annually) compounded monthly? + + >>> np.fv(0.05/12, 10*12, -100, -100) + 15692.928894335748 + + By convention, the negative sign represents cash flow out (i.e. money not + available today). Thus, saving $100 a month at 5% annual interest leads + to $15,692.93 available to spend in 10 years. + """ when = _convert_when(when) rate, nper, pmt, pv, when = map(np.asarray, [rate, nper, pmt, pv, when]) @@ -78,7 +116,43 @@ By convention, the negative sign represents cash flow out (i.e. money not """ def pmt(rate, nper, pv, fv=0, when='end'): - """Payment computed by solving the equation + """ + Compute the payment. + + Parameters + ---------- + rate : array-like + Rate of interest (per period) + nper : array-like + Number of compounding periods + pv : array-like + Present value + fv : array-like + Future value + when : array-like + When payments are due ('begin' (1) or 'end' (0)) + + Notes + ----- + The payment ``pmt`` is computed by solving the equation:: + + fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate * ((1+rate)**nper - 1) == 0 + + or, when ``rate == 0``:: + + fv + pv + pmt * nper == 0 + + Examples + -------- + What would the monthly payment need to be to pay off a $200,000 loan in 15 + years at an annual interest rate of 7.5%? + + >>> np.pmt(0.075/12, 12*15, 200000) + -1854.0247200054619 + + In order to pay-off (i.e. have a future-value of 0) the $200,000 obtained + today, a monthly payment of $1,854.02 would be required. + """ when = _convert_when(when) rate, nper, pv, fv, when = map(np.asarray, [rate, nper, pv, fv, when]) @@ -102,7 +176,52 @@ In order to pay-off (i.e. have a future-value of 0) the $200,000 obtained """ def nper(rate, pmt, pv, fv=0, when='end'): - """Number of periods found by solving the equation + """ + Compute the number of periods. + + Parameters + ---------- + rate : array_like + Rate of interest (per period) + pmt : array_like + Payment + pv : array_like + Present value + fv : array_like + Future value + when : array_like + When payments are due ('begin' (1) or 'end' (0)) + + Notes + ----- + The number of periods ``nper`` is computed by solving the equation:: + + fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate * ((1+rate)**nper - 1) == 0 + + or, when ``rate == 0``:: + + fv + pv + pmt * nper == 0 + + Examples + -------- + If you only had $150 to spend as payment, how long would it take to pay-off + a loan of $8,000 at 7% annual interest? + + >>> np.nper(0.07/12, -150, 8000) + 64.073348770661852 + + So, over 64 months would be required to pay off the loan. + + The same analysis could be done with several different interest rates and/or + payments and/or total amounts to produce an entire table. + + >>> np.nper(*(np.ogrid[0.06/12:0.071/12:0.01/12, -200:-99:100, 6000:7001:1000])) + array([[[ 32.58497782, 38.57048452], + [ 71.51317802, 86.37179563]], + + [[ 33.07413144, 39.26244268], + [ 74.06368256, 90.22989997]]]) + """ when = _convert_when(when) rate, pmt, pv, fv, when = map(np.asarray, [rate, pmt, pv, fv, when]) @@ -139,6 +258,10 @@ array([[[ 32.58497782, 38.57048452], """ def ipmt(rate, per, nper, pv, fv=0.0, when='end'): + """ + Not implemented. + + """ total = pmt(rate, nper, pv, fv, when) # Now, compute the nth step in the amortization raise NotImplementedError @@ -148,7 +271,32 @@ def ppmt(rate, per, nper, pv, fv=0.0, when='end'): return total - ipmt(rate, per, nper, pv, fv, when) def pv(rate, nper, pmt, fv=0.0, when='end'): - """Number of periods found by solving the equation + """ + Compute the present value. + + Parameters + ---------- + rate : array-like + Rate of interest (per period) + nper : array-like + Number of compounding periods + pmt : array-like + Payment + fv : array-like + Future value + when : array-like + When payments are due ('begin' (1) or 'end' (0)) + + Notes + ----- + The present value ``pv`` is computed by solving the equation:: + + fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate * ((1+rate)**nper - 1) = 0 + + or, when ``rate = 0``:: + + fv + pv + pmt * nper = 0 + """ when = _convert_when(when) rate, nper, pmt, fv, when = map(np.asarray, [rate, nper, pmt, fv, when]) @@ -175,7 +323,38 @@ def _g_div_gp(r, n, p, x, y, w): # g(r) is the formula # g'(r) is the derivative with respect to r. def rate(nper, pmt, pv, fv, when='end', guess=0.10, tol=1e-6, maxiter=100): - """Number of periods found by solving the equation + """ + Compute the rate of interest per period. + + Parameters + ---------- + nper : array_like + Number of compounding periods + pmt : array_like + Payment + pv : array_like + Present value + fv : array_like + Future value + when : array_like, optional + When payments are due ('begin' (1) or 'end' (0)) + guess : float, optional + Starting guess for solving the rate of interest + tol : float, optional + Required tolerance for the solution + maxiter : int, optional + Maximum iterations in finding the solution + + Notes + ----- + The rate of interest ``rate`` is computed by solving the equation:: + + fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate * ((1+rate)**nper - 1) = 0 + + or, if ``rate = 0``:: + + fv + pv + pmt * nper = 0 + """ when = _convert_when(when) nper, pmt, pv, fv, when = map(np.asarray, [nper, pmt, pv, fv, when]) diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 00281bf9b..4192e1225 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -1,5 +1,4 @@ -"""Define a simple format for saving numpy arrays to disk. - +""" Define a simple format for saving numpy arrays to disk with the full information about them. @@ -14,13 +13,13 @@ restored by using the `loadedarray.view(correct_dtype)` method. Format Version 1.0 ------------------ -The first 6 bytes are a magic string: exactly "\\x93NUMPY". +The first 6 bytes are a magic string: exactly "\\\\x93NUMPY". The next 1 byte is an unsigned byte: the major version number of the file -format, e.g. \\x01. +format, e.g. \\\\x01. The next 1 byte is an unsigned byte: the minor version number of the file -format, e.g. \\x00. Note: the version of the file format is not tied to the +format, e.g. \\\\x00. Note: the version of the file format is not tied to the version of the numpy package. The next 2 bytes form a little-endian unsigned short int: the length of the @@ -28,8 +27,8 @@ header data HEADER_LEN. The next HEADER_LEN bytes form the header data describing the array's format. It is an ASCII string which contains a Python literal expression of a -dictionary. It is terminated by a newline ('\\n') and padded with spaces -('\\x20') to make the total length of the magic string + 4 + HEADER_LEN be +dictionary. It is terminated by a newline ('\\\\n') and padded with spaces +('\\\\x20') to make the total length of the magic string + 4 + HEADER_LEN be evenly divisible by 16 for alignment purposes. The dictionary contains three keys: @@ -46,7 +45,7 @@ The dictionary contains three keys: For repeatability and readability, the dictionary keys are sorted in alphabetic order. This is for convenience only. A writer SHOULD implement this if -possible. A reader MUST NOT depend on this. +possible. A reader MUST NOT depend on this. Following the header comes the array data. If the dtype contains Python objects (i.e. dtype.hasobject is True), then the data is a Python pickle of the array. @@ -112,13 +111,26 @@ def read_magic(fp): return major, minor def dtype_to_descr(dtype): - """ Get a serializable descriptor from the dtype. + """ + Get a serializable descriptor from the dtype. - The .descr attribute of a dtype object cannot be round-tripped through the - dtype() constructor. Simple types, like dtype('float32'), have a descr - which looks like a record array with one field with '' as a name. The - dtype() constructor interprets this as a request to give a default name. - Instead, we construct descriptor that can be passed to dtype(). + The .descr attribute of a dtype object cannot be round-tripped through + the dtype() constructor. Simple types, like dtype('float32'), have + a descr which looks like a record array with one field with '' as + a name. The dtype() constructor interprets this as a request to give + a default name. Instead, we construct descriptor that can be passed to + dtype(). + + Parameters + ---------- + dtype : dtype + The dtype of the array that will be written to disk. + + Returns + ------- + descr : object + An object that can be passed to `numpy.dtype()` in order to + replicate the input dtype. """ if dtype.names is not None: @@ -188,7 +200,8 @@ def write_array_header_1_0(fp, d): fp.write(header) def read_array_header_1_0(fp): - """ Read an array header from a filelike object using the 1.0 file format + """ + Read an array header from a filelike object using the 1.0 file format version. This will leave the file object located just after the header. @@ -196,6 +209,7 @@ def read_array_header_1_0(fp): Parameters ---------- fp : filelike object + A file object or something with a `.read()` method like a file. Returns ------- @@ -206,10 +220,13 @@ def read_array_header_1_0(fp): or Fortran-contiguous. Otherwise, it will be made contiguous before writing it out. dtype : dtype + The dtype of the file's data. Raises ------ - ValueError if the data is invalid. + ValueError : + If the data is invalid. + """ # Read an unsigned, little-endian short int which has the length of the # header. @@ -259,23 +276,31 @@ def read_array_header_1_0(fp): return d['shape'], d['fortran_order'], dtype def write_array(fp, array, version=(1,0)): - """ Write an array to a file, including a header. + """ + Write an array to an NPY file, including a header. If the array is neither C-contiguous or Fortran-contiguous AND if the - filelike object is not a real file object, then this function will have to - copy data in memory. + filelike object is not a real file object, then this function will have + to copy data in memory. Parameters ---------- fp : filelike object + An open, writable file object or similar object with a `.write()` + method. array : numpy.ndarray + The array to write to disk. version : (int, int), optional The version number of the format. Raises ------ - ValueError if the array cannot be persisted. Various other errors from - pickling if the array contains Python objects as part of its dtype. + ValueError + If the array cannot be persisted. + Various other errors + If the array contains Python objects as part of its dtype, the + process of pickling them may raise arbitrary errors if the objects + are not picklable. """ if version != (1, 0): @@ -300,7 +325,8 @@ def write_array(fp, array, version=(1,0)): fp.write(array.tostring('C')) def read_array(fp): - """ Read an array from a file. + """ + Read an array from an NPY file. Parameters ---------- @@ -311,10 +337,12 @@ def read_array(fp): Returns ------- array : numpy.ndarray + The array from the data on disk. Raises ------ - ValueError if the data is invalid. + ValueError + If the data is invalid. """ version = read_magic(fp) @@ -353,19 +381,28 @@ def read_array(fp): def open_memmap(filename, mode='r+', dtype=None, shape=None, fortran_order=False, version=(1,0)): - """ Open a .npy file as a memory-mapped array. + """ + Open a .npy file as a memory-mapped array. + + This may be used to read an existing file or create a new one. Parameters ---------- filename : str + The name of the file on disk. This may not be a filelike object. mode : str, optional The mode to open the file with. In addition to the standard file modes, - 'c' is also accepted to mean "copy on write". + 'c' is also accepted to mean "copy on write". See `numpy.memmap` for + the available mode strings. dtype : dtype, optional + The data type of the array if we are creating a new file in "write" + mode. shape : tuple of int, optional + The shape of the array if we are creating a new file in "write" + mode. fortran_order : bool, optional - If the mode is a "write" mode, then the file will be created using this - dtype, shape, and contiguity. + Whether the array should be Fortran-contiguous (True) or + C-contiguous (False) if we are creating a new file in "write" mode. version : tuple of int (major, minor) If the mode is a "write" mode, then this is the version of the file format used to create the file. @@ -373,11 +410,19 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None, Returns ------- marray : numpy.memmap + The memory-mapped array. Raises ------ - ValueError if the data or the mode is invalid. - IOError if the file is not found or cannot be opened correctly. + ValueError + If the data or the mode is invalid. + IOError + If the file is not found or cannot be opened correctly. + + See Also + -------- + numpy.memmap + """ if 'w' in mode: # We are creating the file, not reading it. diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index f0b941872..3a0212a0e 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -33,42 +33,68 @@ import numpy as np #end Fernando's utilities def linspace(start, stop, num=50, endpoint=True, retstep=False): - """Return evenly spaced numbers. + """ + Return evenly spaced numbers. - Return num evenly spaced samples from start to stop. If - endpoint is True, the last sample is stop. If retstep is - True then return (seq, step_value), where step_value used. + `linspace` returns `num` evenly spaced samples, calculated over the + interval ``[start, stop]``. The endpoint of the interval can optionally + be excluded. Parameters ---------- - start : {float} - The value the sequence starts at. - stop : {float} - The value the sequence stops at. If ``endpoint`` is false, then - this is not included in the sequence. Otherwise it is - guaranteed to be the last value. - num : {integer} + start : float + The starting value of the sequence. + stop : float + The end value of the sequence, unless `endpoint` is set to False. + In that case, the sequence consists of all but the last of ``num + 1`` + evenly spaced samples, so that `stop` is excluded. Note that the step + size changes when `endpoint` is False. + num : int Number of samples to generate. Default is 50. - endpoint : {boolean} - If true, ``stop`` is the last sample. Otherwise, it is not - included. Default is true. - retstep : {boolean} - If true, return ``(samples, step)``, where ``step`` is the - spacing used in generating the samples. + endpoint : bool + If true, `stop` is the last sample. Otherwise, it is not included. + Default is True. + retstep : bool + If True, return (`samples`, `step`), where `step` is the spacing + between samples. Returns ------- - samples : {array} - ``num`` equally spaced samples from the range [start, stop] - or [start, stop). - step : {float} (Only if ``retstep`` is true) + samples : ndarray + `num` equally spaced samples in the closed interval + ``[start, stop]`` or the half-open interval ``[start, stop)`` + (depending on whether `endpoint` is True or False). + step : float (only if `retstep` is True) Size of spacing between samples. + See Also -------- - arange : Similiar to linspace, however, when used with - a float endpoint, that endpoint may or may not be included. - logspace + arange : Similiar to `linspace`, but uses a step size (instead of the + number of samples). Note that, when used with a float + endpoint, the endpoint may or may not be included. + logspace : Samples uniformly distributed in log space. + + Examples + -------- + >>> np.linspace(2.0, 3.0, num=5) + array([ 2. , 2.25, 2.5 , 2.75, 3. ]) + >>> np.linspace(2.0, 3.0, num=5, endpoint=False) + array([ 2. , 2.2, 2.4, 2.6, 2.8]) + >>> np.linspace(2.0, 3.0, num=5, retstep=True) + (array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) + + Graphical illustration: + + >>> import matplotlib.pyplot as plt + >>> N = 8 + >>> y = np.zeros(N) + >>> x1 = np.linspace(0, 10, N, endpoint=True) + >>> x2 = np.linspace(0, 10, N, endpoint=False) + >>> plt.plot(x1, y, 'o') + >>> plt.plot(x2, y + 0.5, 'o') + >>> plt.ylim([-0.5, 1]) + >>> plt.show() """ num = int(num) @@ -89,10 +115,73 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False): return y def logspace(start,stop,num=50,endpoint=True,base=10.0): - """Evenly spaced numbers on a logarithmic scale. + """ + Return numbers spaced evenly on a log scale. + + In linear space, the sequence starts at ``base ** start`` + (`base` to the power of `start`) and ends with ``base ** stop`` + (see `endpoint` below). + + Parameters + ---------- + start : float + ``base ** start`` is the starting value of the sequence. + stop : float + ``base ** stop`` is the final value of the sequence, unless `endpoint` + is False. In that case, ``num + 1`` values are spaced over the + interval in log-space, of which all but the last (a sequence of + length ``num``) are returned. + num : integer, optional + Number of samples to generate. Default is 50. + endpoint : boolean, optional + If true, `stop` is the last sample. Otherwise, it is not included. + Default is True. + base : float, optional + The base of the log space. The step size between the elements in + ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. + Default is 10.0. + + Returns + ------- + samples : ndarray + `num` samples, equally spaced on a log scale. + + See Also + -------- + arange : Similiar to linspace, with the step size specified instead of the + number of samples. Note that, when used with a float endpoint, the + endpoint may or may not be included. + linspace : Similar to logspace, but with the samples uniformly distributed + in linear space, instead of log space. + + Notes + ----- + Logspace is equivalent to the code + + >>> y = linspace(start, stop, num=num, endpoint=endpoint) + >>> power(base, y) + + Examples + -------- + >>> np.logspace(2.0, 3.0, num=4) + array([ 100. , 215.443469 , 464.15888336, 1000. ]) + >>> np.logspace(2.0, 3.0, num=4, endpoint=False) + array([ 100. , 177.827941 , 316.22776602, 562.34132519]) + >>> np.logspace(2.0, 3.0, num=4, base=2.0) + array([ 4. , 5.0396842 , 6.34960421, 8. ]) + + Graphical illustration: + + >>> import matplotlib.pyplot as plt + >>> N = 10 + >>> x1 = np.logspace(0.1, 1, N, endpoint=True) + >>> x2 = np.logspace(0.1, 1, N, endpoint=False) + >>> y = np.zeros(N) + >>> plt.plot(x1, y, 'o') + >>> plt.plot(x2, y + 0.5, 'o') + >>> plt.ylim([-0.5, 1]) + >>> plt.show() - Computes int(num) evenly spaced exponents from base**start to - base**stop. If endpoint=True, then last number is base**stop """ y = linspace(start,stop,num=num,endpoint=endpoint) return _nx.power(base,y) @@ -103,43 +192,40 @@ def iterable(y): return 1 def histogram(a, bins=10, range=None, normed=False, weights=None, new=False): - """Compute the histogram from a set of data. + """ + Compute the histogram of a set of data. Parameters ---------- - a : array - The data to histogram. - - bins : int or sequence - If an int, then the number of equal-width bins in the given - range. If new=True, bins can also be the bin edges, allowing - for non-constant bin widths. - - range : (float, float) - The lower and upper range of the bins. If not provided, range - is simply (a.min(), a.max()). Using new=False, lower than - range are ignored, and values higher than range are tallied in - the rightmost bin. Using new=True, both lower and upper - outliers are ignored. - - normed : bool - If False, the result array will contain the number of samples - in each bin. If True, the result array is the value of the - probability *density* function at the bin normalized such that - the *integral* over the range is 1. Note that the sum of all - of the histogram values will not usually be 1; it is not a + a : array_like + Input data. + bins : int or sequence of scalars, optional + If `bins` is an int, it gives the number of equal-width bins in the + given range (10, by default). If `new` is True, bins can also be + the bin edges, allowing for non-uniform bin widths. + range : (float, float), optional + The lower and upper range of the bins. If not provided, range + is simply ``(a.min(), a.max())``. With `new` set to True, values + outside the range are ignored. With `new` set to False, values + below the range are ignored, and those above the range are tallied + in the rightmost bin. + normed : bool, optional + If False, the result will contain the number of samples + in each bin. If True, the result is the value of the + probability *density* function at the bin, normalized such that + the *integral* over the range is 1. Note that the sum of the + histogram values will often not be equal to 1; it is not a probability *mass* function. - - weights : array - An array of weights, the same shape as a. If normed is False, - the histogram is computed by summing the weights of the values - falling into each bin. If normed is True, the weights are - normalized, so that the integral of the density over the range - is 1. This option is only available with new=True. - - new : bool - Compatibility argument to transition from the old version - (v1.1) to the new version (v1.2). + weights : array_like, optional + An array of weights, of the same shape as `a`. Each value in `a` + only contributes its associated weight towards the bin count + (instead of 1). If `normed` is True, the weights are normalized, + so that the integral of the density over the range remains 1. + The `weights` keyword is only available with `new` set to True. + new : bool, optional + Compatibility argument to aid in the transition between the old + (v1.1) and the new (v1.2) implementations. In version 1.2, + `new` will be True by default. Returns ------- @@ -147,14 +233,30 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, new=False): The values of the histogram. See `normed` and `weights` for a description of the possible semantics. - bin_edges : float array - With new=False, return the left bin edges (length(hist)). - With new=True, return the bin edges (length(hist)+1). + bin_edges : array of dtype float + With ``new = False``, return the left bin edges (``length(hist)``). + With ``new = True``, return the bin edges ``(length(hist)+1)``. See Also -------- histogramdd + Notes + ----- + All but the last (righthand-most) bin is half-open. In other words, if + `bins` is:: + + [1, 2, 3, 4] + + then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the + second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* + 4. + + Examples + -------- + >>> np.histogram([1,2,1], bins=[0,1,2,3], new=True) + (array([0, 2, 1]), array([0, 1, 2, 3])) + """ # Old behavior if new is False: @@ -280,47 +382,53 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, new=False): def histogramdd(sample, bins=10, range=None, normed=False, weights=None): - """histogramdd(sample, bins=10, range=None, normed=False, weights=None) - - Return the N-dimensional histogram of the sample. + """ + Compute the multidimensional histogram of some data. Parameters ---------- - sample : sequence or array - A sequence containing N arrays or an NxM array. Input data. - - bins : sequence or scalar - A sequence of edge arrays, a sequence of bin counts, or a scalar - which is the bin count for all dimensions. Default is 10. - - range : sequence - A sequence of lower and upper bin edges. Default is [min, max]. - - normed : boolean - If False, return the number of samples in each bin, if True, - returns the density. - - weights : array - Array of weights. The weights are normed only if normed is True. - Should the sum of the weights not equal N, the total bin count will - not be equal to the number of samples. + sample : array-like + Data to histogram passed as a sequence of D arrays of length N, or + as an (N,D) array. + bins : sequence or int, optional + The bin specification: + + * A sequence of arrays describing the bin edges along each dimension. + * The number of bins for each dimension (nx, ny, ... =bins) + * The number of bins for all dimensions (nx=ny=...=bins). + + range : sequence, optional + A sequence of lower and upper bin edges to be used if the edges are + not given explicitely in `bins`. Defaults to the minimum and maximum + values along each dimension. + normed : boolean, optional + If False, returns the number of samples in each bin. If True, returns + the bin density, ie, the bin count divided by the bin hypervolume. + weights : array-like (N,), optional + An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. + Weights are normalized to 1 if normed is True. If normed is False, the + values of the returned histogram are equal to the sum of the weights + belonging to the samples falling into each bin. Returns ------- - hist : array - Histogram array. - + H : array + The multidimensional histogram of sample x. See normed and weights for + the different possible semantics. edges : list - List of arrays defining the lower bin edges. + A list of D arrays describing the bin edges for each dimension. See Also -------- - histogram + histogram: 1D histogram + histogram2d: 2D histogram Examples -------- - >>> x = np.random.randn(100,3) - >>> hist3d, edges = np.lib.histogramdd(x, bins = (5, 6, 7)) + >>> r = np.random.randn(100,3) + >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) + >>> H.shape, edges[0].size, edges[1].size, edges[2].size + ((5,8,4), 6, 9, 5) """ @@ -439,7 +547,8 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None): def average(a, axis=None, weights=None, returned=False): - """Return the weighted average of array a over the given axis. + """ + Return the weighted average of array over the specified axis. Parameters @@ -447,42 +556,52 @@ def average(a, axis=None, weights=None, returned=False): a : array_like Data to be averaged. axis : {None, integer}, optional - Axis along which to average a. If None, averaging is done over the + Axis along which to average `a`. If `None`, averaging is done over the entire array irrespective of its shape. weights : {None, array_like}, optional - The importance each datum has in the computation of the - average. The weights array can either be 1D, in which case its length - must be the size of a along the given axis, or of the same shape as a. - If weights=None, all data are assumed to have weight equal to one. - returned :{False, boolean}, optional - If True, the tuple (average, sum_of_weights) is returned, - otherwise only the average is returmed. Note that if weights=None, then - the sum of the weights is also the number of elements averaged over. + The importance that each datum has in the computation of the average. + The weights array can either be 1D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + returned : {False, boolean}, optional + If `True`, the tuple (`average`, `sum_of_weights`) is returned, + otherwise only the average is returned. Note that if `weights=None`, + `sum_of_weights` is equivalent to the number of elements over which + the average is taken. Returns ------- average, [sum_of_weights] : {array_type, double} - Return the average along the specified axis. When returned is True, + Return the average along the specified axis. When returned is `True`, return a tuple with the average as the first element and the sum - of the weights as the second element. The return type is Float if a is - of integer type, otherwise it is of the same type as a. - sum_of_weights is has the same type as the average. - - - Examples - -------- - >>> np.average(range(1,11), weights=range(10,0,-1)) - 4.0 + of the weights as the second element. The return type is `Float` + if `a` is of integer type, otherwise it is of the same type as `a`. + `sum_of_weights` is of the same type as `average`. Raises ------ ZeroDivisionError - When all weights along axis are zero. See numpy.ma.average for a + When all weights along axis are zero. See `numpy.ma.average` for a version robust to this type of error. TypeError - When the length of 1D weights is not the same as the shape of a + When the length of 1D `weights` is not the same as the shape of `a` along axis. + See Also + -------- + ma.average : average for masked arrays + + Examples + -------- + >>> data = range(1,5) + >>> data + [1, 2, 3, 4] + >>> np.average(data) + 2.5 + >>> np.average(range(1,11), weights=range(10,0,-1)) + 4.0 + """ if not isinstance(a, np.matrix) : a = np.asarray(a) @@ -528,37 +647,79 @@ def asarray_chkfinite(a): return a def piecewise(x, condlist, funclist, *args, **kw): - """Return a piecewise-defined function. + """ + Evaluate a piecewise-defined function. + + Given a set of conditions and corresponding functions, evaluate each + function on the input data wherever its condition is true. + + Parameters + ---------- + x : (N,) ndarray + The input domain. + condlist : list of M (N,)-shaped boolean arrays + Each boolean array corresponds to a function in `funclist`. Wherever + `condlist[i]` is True, `funclist[i](x)` is used as the output value. + + Each boolean array in `condlist` selects a piece of `x`, + and should therefore be of the same shape as `x`. + + The length of `condlist` must correspond to that of `funclist`. + If one extra function is given, i.e. if the length of `funclist` is + M+1, then that extra function is the default value, used wherever + all conditions are false. + funclist : list of M or M+1 callables, f(x,*args,**kw), or values + Each function is evaluated over `x` wherever its corresponding + condition is True. It should take an array as input and give an array + or a scalar value as output. If, instead of a callable, + a value is provided then a constant function (``lambda x: value``) is + assumed. + args : tuple, optional + Any further arguments given to `piecewise` are passed to the functions + upon execution, i.e., if called ``piecewise(...,...,1,'a')``, then + each function is called as ``f(x,1,'a')``. + kw : dictionary, optional + Keyword arguments used in calling `piecewise` are passed to the + functions upon execution, i.e., if called + ``piecewise(...,...,lambda=1)``, then each function is called as + ``f(x,lambda=1)``. + + Returns + ------- + out : ndarray + The output is the same shape and type as x and is found by + calling the functions in `funclist` on the appropriate portions of `x`, + as defined by the boolean arrays in `condlist`. Portions not covered + by any condition have undefined values. + + Notes + ----- + This is similar to choose or select, except that functions are + evaluated on elements of `x` that satisfy the corresponding condition from + `condlist`. - x is the domain + The result is:: - condlist is a list of boolean arrays or a single boolean array - The length of the condition list must be n2 or n2-1 where n2 - is the length of the function list. If len(condlist)==n2-1, then - an 'otherwise' condition is formed by |'ing all the conditions - and inverting. + |-- + |funclist[0](x[condlist[0]]) + out = |funclist[1](x[condlist[1]]) + |... + |funclist[n2](x[condlist[n2]]) + |-- - funclist is a list of functions to call of length (n2). - Each function should return an array output for an array input - Each function can take (the same set) of extra arguments and - keyword arguments which are passed in after the function list. - A constant may be used in funclist for a function that returns a - constant (e.g. val and lambda x: val are equivalent in a funclist). + Examples + -------- + Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. - The output is the same shape and type as x and is found by - calling the functions on the appropriate portions of x. + >>> x = np.arange(6) - 2.5 # x runs from -2.5 to 2.5 in steps of 1 + >>> np.piecewise(x, [x < 0, x >= 0.5], [-1,1]) + array([-1., -1., -1., 1., 1., 1.]) - Note: This is similar to choose or select, except - the the functions are only evaluated on elements of x - that satisfy the corresponding condition. + Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for + ``x >= 0``. - The result is - |-- - | f1(x) for condition1 - y = --| f2(x) for condition2 - | ... - | fn(x) for conditionn - |-- + >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) + array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) """ x = asanyarray(x) @@ -608,25 +769,30 @@ def piecewise(x, condlist, funclist, *args, **kw): return y def select(condlist, choicelist, default=0): - """Return an array composed of different elements in choicelist, - depending on the list of conditions. + """ + Return an array drawn from elements in choicelist, depending on conditions. - :Parameters: - condlist : list of N boolean arrays of length M + Parameters + ---------- + condlist : list of N boolean arrays of length M The conditions C_0 through C_(N-1) which determine from which vector the output elements are taken. - choicelist : list of N arrays of length M + choicelist : list of N arrays of length M Th vectors V_0 through V_(N-1), from which the output elements are chosen. - :Returns: - output : 1-dimensional array of length M + Returns + ------- + output : 1-dimensional array of length M The output at position m is the m-th element of the first vector V_n for which C_n[m] is non-zero. Note that the output depends on the order of conditions, since the first satisfied condition is used. - Equivalent to: + Notes + ----- + Equivalent to: + :: output = [] for m in range(M): @@ -658,28 +824,76 @@ def select(condlist, choicelist, default=0): return choose(S, tuple(choicelist)) def copy(a): - """Return an array copy of the given object. + """ + Return an array copy of the given object. + + Parameters + ---------- + a : array_like + Input data. + + Returns + ------- + arr : ndarray + Array interpretation of `a`. + + Notes + ----- + This is equivalent to + + >>> np.array(a, copy=True) + + Examples + -------- + Create an array x, with a reference y and a copy z: + + >>> x = np.array([1, 2, 3]) + >>> y = x + >>> z = np.copy(x) + + Note that, when we modify x, y changes, but not z: + + >>> x[0] = 10 + >>> x[0] == y[0] + True + >>> x[0] == z[0] + False + """ return array(a, copy=True) # Basic operations def gradient(f, *varargs): - """Calculate the gradient of an N-dimensional scalar function. - - Uses central differences on the interior and first differences on boundaries - to give the same shape. + """ + Return the gradient of an N-dimensional array. - Inputs: + The gradient is computed using central differences in the interior + and first differences at the boundaries. The returned gradient hence has + the same shape as the input array. - f -- An N-dimensional array giving samples of a scalar function + Parameters + ---------- + f : array_like + An N-dimensional array containing samples of a scalar function. + `*varargs` : scalars + 0, 1, or N scalars specifying the sample distances in each direction, + that is: `dx`, `dy`, `dz`, ... The default distance is 1. - varargs -- 0, 1, or N scalars giving the sample distances in each direction - Outputs: + Returns + ------- + g : ndarray + N arrays of the same shape as `f` giving the derivative of `f` with + respect to each dimension. - N arrays of the same shape as f giving the derivative of f with respect - to each dimension. + Examples + -------- + >>> np.gradient(np.array([[1,1],[3,4]])) + [array([[ 2., 3.], + [ 2., 3.]]), + array([[ 0., 0.], + [ 1., 1.]])] """ N = len(f.shape) # number of dimensions @@ -740,7 +954,38 @@ def gradient(f, *varargs): def diff(a, n=1, axis=-1): - """Calculate the nth order discrete difference along given axis. + """ + Calculate the nth order discrete difference along given axis. + + Parameters + ---------- + a : array_like + Input array + n : int, optional + The number of times values are differenced. + axis : int, optional + The axis along which the difference is taken. + + Returns + ------- + out : ndarray + The `n` order differences. The shape of the output is the same as `a` + except along `axis` where the dimension is `n` less. + + Examples + -------- + >>> x = np.array([0,1,3,9,5,10]) + >>> np.diff(x) + array([ 1, 2, 6, -4, 5]) + >>> np.diff(x,n=2) + array([ 1, 4, -10, 9]) + >>> x = np.array([[1,3,6,10],[0,5,6,8]]) + >>> np.diff(x) + array([[2, 3, 4], + [5, 1, 2]]) + >>> np.diff(x,axis=0) + array([[-1, 2, 0, -2]]) + """ if n == 0: return a @@ -807,16 +1052,60 @@ except RuntimeError: def interp(x, xp, fp, left=None, right=None): - """Return the value of a piecewise-linear function at each value in x. + """ + One-dimensional linear interpolation. + + Returns the one-dimensional piecewise linear interpolant to a function + with given values at discrete data-points. + + Parameters + ---------- + x : array_like + The x-coordinates of the interpolated values. + + xp : 1-D sequence of floats + The x-coordinates of the data points, must be increasing. + + fp : 1-D sequence of floats + The y-coordinates of the data points, same length as `xp`. - The piecewise-linear function, f, is defined by the known data-points - fp=f(xp). The xp points must be sorted in increasing order but this is - not checked. + left : float, optional + Value to return for `x < xp[0]`, default is `fp[0]`. + + right : float, optional + Value to return for `x > xp[-1]`, defaults is `fp[-1]`. + + Returns + ------- + y : {float, ndarray} + The interpolated values, same shape as `x`. + + Raises + ------ + ValueError + If `xp` and `fp` have different length + + Notes + ----- + Does not check that the x-coordinate sequence `xp` is increasing. + If `xp` is not increasing, the results are nonsense. + A simple check for increasingness is:: + + np.all(np.diff(xp) > 0) + + + Examples + -------- + >>> xp = [1, 2, 3] + >>> fp = [3, 2, 0] + >>> np.interp(2.5, xp, fp) + 1.0 + >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) + array([ 3. , 3. , 2.5, 0.56, 0. ]) + >>> UNDEF = -99.0 + >>> np.interp(3.14, xp, fp, right=UNDEF) + -99.0 - For values of x < xp[0] return the value given by left. If left is None, - then return fp[0]. - For values of x > xp[-1] return the value given by right. If right is - None, then return fp[-1]. """ if isinstance(x, (float, int, number)): return compiled_interp([x], xp, fp, left, right).item() @@ -826,13 +1115,26 @@ def interp(x, xp, fp, left=None, right=None): def angle(z, deg=0): """ - Return the angle of the complex argument z. + Return the angle of the complex argument. + + Parameters + ---------- + z : array_like + A complex number or sequence of complex numbers. + deg : bool, optional + Return angle in degrees if True, radians if False. Default is False. + + Returns + ------- + angle : {ndarray, scalar} + The angle is defined as counterclockwise from the positive real axis on + the complex plane, with dtype as numpy.float64. Examples -------- - >>> np.angle(1+1j) # in radians - 0.78539816339744828 - >>> np.angle(1+1j,deg=True) # in degrees + >>> np.angle([1.0, 1.0j, 1+1j]) # in radians + array([ 0. , 1.57079633, 0.78539816]) + >>> np.angle(1+1j, deg=True) # in degrees 45.0 """ @@ -850,8 +1152,26 @@ def angle(z, deg=0): return arctan2(zimag, zreal) * fact def unwrap(p, discont=pi, axis=-1): - """Unwrap radian phase p by changing absolute jumps greater than - 'discont' to their 2*pi complement along the given axis. + """ + Unwrap by changing deltas between values to 2*pi complement. + + Unwrap radian phase `p` by changing absolute jumps greater than + `discont` to their 2*pi complement along the given axis. + + Parameters + ---------- + p : array_like + Input array. + discont : float + Maximum discontinuity between values. + axis : integer + Axis along which unwrap will operate. + + Returns + ------- + out : ndarray + Output array + """ p = asarray(p) nd = len(p.shape) @@ -867,10 +1187,18 @@ def unwrap(p, discont=pi, axis=-1): return up def sort_complex(a): - """ Sort 'a' as a complex array using the real part first and then - the imaginary part if the real part is equal (the default sort order - for complex arrays). This function is a wrapper ensuring a complex - return type. + """ + Sort a complex array using the real part first, then the imaginary part. + + Parameters + ---------- + a : array_like + Input array + + Returns + ------- + out : complex ndarray + Always returns a sorted complex array. """ b = array(a,copy=True) @@ -886,7 +1214,16 @@ def sort_complex(a): return b def trim_zeros(filt, trim='fb'): - """ Trim the leading and trailing zeros from a 1D array. + """ + Trim the leading and trailing zeros from a 1D array. + + Parameters + ---------- + filt : array_like + Input array. + trim : string, optional + A string with 'f' representing trim from front and 'b' to trim from + back. Examples -------- @@ -914,12 +1251,25 @@ if sys.hexversion < 0x2040000: def unique(x): """ - Return sorted unique items from an array or sequence. + Return the sorted, unique elements of an array or sequence. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + y : ndarray + The sorted, unique elements are returned in a 1-D array. Examples -------- - >>> np.unique([5,2,4,0,4,4,2,2,1]) - array([0, 1, 2, 4, 5]) + >>> np.unique([1, 1, 2, 2, 3, 3]) + array([1, 2, 3]) + >>> a = np.array([[1, 1], [2, 3]]) + >>> np.unique(a) + array([1, 2, 3]) """ try: @@ -935,10 +1285,44 @@ def unique(x): return asarray(items) def extract(condition, arr): - """Return the elements of ravel(arr) where ravel(condition) is True - (in 1D). + """ + Return the elements of an array that satisfy some condition. + + This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If + `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. + + Parameters + ---------- + condition : array_like + An array whose nonzero or True entries indicate the elements of `arr` + to extract. + arr : array_like + Input array of the same size as `condition`. + + See Also + -------- + take, put, putmask + + Examples + -------- + >>> arr = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12]]) + >>> arr + array([[ 1, 2, 3, 4], + [ 5, 6, 7, 8], + [ 9, 10, 11, 12]]) + >>> condition = np.mod(arr, 3)==0 + >>> condition + array([[False, False, True, False], + [False, True, False, False], + [ True, False, False, True]], dtype=bool) + >>> np.extract(condition, arr) + array([ 3, 6, 9, 12]) + + If `condition` is boolean: + + >>> arr[condition] + array([ 3, 6, 9, 12]) - Equivalent to compress(ravel(condition), ravel(arr)). """ return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) @@ -951,7 +1335,29 @@ def place(arr, mask, vals): return _insert(arr, mask, vals) def nansum(a, axis=None): - """Sum the array over the given axis, treating NaNs as 0. + """ + Sum the array along the given axis, treating NaNs as zero. + + Parameters + ---------- + a : array-like + Input array. + axis : {int, None}, optional + Axis along which the sum is computed. By default `a` is flattened. + + Returns + ------- + y : {ndarray, scalar} + The sum ignoring NaNs. + + Examples + -------- + >>> np.nansum([np.nan, 1]) + 1.0 + >>> a = np.array([[1, 1], [1, np.nan]]) + >>> np.nansum(a, axis=0) + array([ 2., 1.]) + """ y = array(a,subok=True) if not issubclass(y.dtype.type, _nx.integer): @@ -959,7 +1365,31 @@ def nansum(a, axis=None): return y.sum(axis) def nanmin(a, axis=None): - """Find the minimium over the given axis, ignoring NaNs. + """ + Find the minimum along the given axis, ignoring NaNs. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the minimum is computed. By default `a` is flattened. + + Returns + ------- + y : {ndarray, scalar} + The minimum ignoring NaNs. + + Examples + -------- + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmin(a) + 1.0 + >>> np.nanmin(a, axis=0) + array([ 1., 2.]) + >>> np.nanmin(a, axis=1) + array([ 1., 3.]) + """ y = array(a,subok=True) if not issubclass(y.dtype.type, _nx.integer): @@ -967,7 +1397,12 @@ def nanmin(a, axis=None): return y.min(axis) def nanargmin(a, axis=None): - """Find the indices of the minimium over the given axis ignoring NaNs. + """ + Return indices of the minimum values along the given axis of `a`, + ignoring NaNs. + + Refer to `numpy.nanargmax` for detailed documentation. + """ y = array(a, subok=True) if not issubclass(y.dtype.type, _nx.integer): @@ -975,7 +1410,31 @@ def nanargmin(a, axis=None): return y.argmin(axis) def nanmax(a, axis=None): - """Find the maximum over the given axis ignoring NaNs. + """ + Find the maximum along the given axis, ignoring NaNs. + + Parameters + ---------- + a : array-like + Input array. + axis : {int, None}, optional + Axis along which the maximum is computed. By default `a` is flattened. + + Returns + ------- + y : {ndarray, scalar} + The maximum ignoring NaNs. + + Examples + -------- + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmax(a) + 3.0 + >>> np.nanmax(a, axis=0) + array([ 3., 2.]) + >>> np.nanmax(a, axis=1) + array([ 2., 3.]) + """ y = array(a, subok=True) if not issubclass(y.dtype.type, _nx.integer): @@ -983,7 +1442,38 @@ def nanmax(a, axis=None): return y.max(axis) def nanargmax(a, axis=None): - """Find the maximum over the given axis ignoring NaNs. + """ + Return indices of the maximum values over the given axis of 'a', + ignoring NaNs. + + Parameters + ---------- + a : array-like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + + Returns + ------- + index_array : {ndarray, int} + An array of indices or a single index value. + + See Also + -------- + argmax + + Examples + -------- + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmax(a) + 0 + >>> np.nanargmax(a) + 1 + >>> np.nanargmax(a, axis=0) + array([1, 1]) + >>> np.nanargmax(a, axis=1) + array([1, 0]) + """ y = array(a,subok=True) if not issubclass(y.dtype.type, _nx.integer): @@ -1144,20 +1634,77 @@ class vectorize(object): return _res def cov(m, y=None, rowvar=1, bias=0): - """Estimate the covariance matrix. + """ + Estimate a covariance matrix, given data. - If m is a vector, return the variance. For matrices return the - covariance matrix. + Covariance indicates the level to which two variables vary together. + If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, + then the covariance matrix element :math:`C_{ij}` is the covariance of + :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance + of :math:`x_i`. - If y is given it is treated as an additional (set of) - variable(s). + Parameters + ---------- + m : array-like + A 1D or 2D array containing multiple variables and observations. + Each row of `m` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array-like, optional + An additional set of variables and observations. `y` has the same + form as that of `m`. + rowvar : int, optional + If `rowvar` is non-zero (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : int, optional + Default normalization is by ``(N-1)``, where ``N`` is the number of + observations given (unbiased estimate). If `bias` is 1, then + normalization is by ``N``. - Normalization is by (N-1) where N is the number of observations - (unbiased estimate). If bias is 1 then normalization is by N. + Returns + ------- + out : ndarray + The covariance matrix of the variables. + + See Also + -------- + corrcoef : Normalized covariance matrix + + Examples + -------- + Consider two variables, :math:`x_0` and :math:`x_1`, which + correlate perfectly, but in opposite directions: + + >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T + >>> x + array([[0, 1, 2], + [2, 1, 0]]) + + Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance + matrix shows this clearly: + + >>> np.cov(x) + array([[ 1., -1.], + [-1., 1.]]) + + Note that element :math:`C_{0,1}`, which shows the correlation between + :math:`x_0` and :math:`x_1`, is negative. + + Further, note how `x` and `y` are combined: + + >>> x = [-2.1, -1, 4.3] + >>> y = [3, 1.1, 0.12] + >>> X = np.vstack((x,y)) + >>> print np.cov(X) + [[ 11.71 -4.286 ] + [ -4.286 2.14413333]] + >>> print np.cov(x, y) + [[ 11.71 -4.286 ] + [ -4.286 2.14413333]] + >>> print np.cov(x) + 11.71 - If rowvar is non-zero (default), then each row is a variable with - observations in the columns, otherwise each column - is a variable and the observations are in the rows. """ X = array(m, ndmin=2, dtype=float) @@ -1192,7 +1739,21 @@ def cov(m, y=None, rowvar=1, bias=0): return (dot(X, X.T.conj()) / fact).squeeze() def corrcoef(x, y=None, rowvar=1, bias=0): - """The correlation coefficients + """ + Correlation coefficients. + + Please refer to the documentation for `cov` for more detail. The + relationship between the correlation coefficient matrix, P, and the + covariance matrix, C, is + + .. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } + + The values of P are between -1 and 1. + + See Also + -------- + cov : Covariance matrix + """ c = cov(x, y, rowvar, bias) try: @@ -1202,7 +1763,88 @@ def corrcoef(x, y=None, rowvar=1, bias=0): return c/sqrt(multiply.outer(d,d)) def blackman(M): - """blackman(M) returns the M-point Blackman window. + """ + Return the Blackman window. + + The Blackman window is a taper formed by using the the first + three terms of a summation of cosines. It was designed to have close + to the minimal leakage possible. + It is close to optimal, only slightly worse than a Kaiser window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : array + The window, normalized to one (the value one + appears only if the number of samples is odd). + + See Also + -------- + bartlett, hamming, hanning, kaiser + + Notes + ----- + The Blackman window is defined as + + .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) + + + Most references to the Blackman window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. It is known as a + "near optimal" tapering function, almost as good (by some measures) + as the kaiser window. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] Wikipedia, "Window function", + http://en.wikipedia.org/wiki/Window_function + .. [3] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. + Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. + + Examples + -------- + >>> from numpy import blackman + >>> blackman(12) + array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01, + 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, + 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, + 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) + + + Plot the window and the frequency response: + + >>> from numpy import clip, log10, array, bartlett + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = blackman(51) + >>> plt.plot(window) + >>> plt.title("Blackman window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + >>> plt.show() + + >>> A = fft(window, 2048) / 25.5 + >>> mag = abs(fftshift(A)) + >>> freq = linspace(-0.5,0.5,len(A)) + >>> response = 20*log10(mag) + >>> response = clip(response,-100,100) + >>> plt.plot(freq, response) + >>> plt.title("Frequency response of Bartlett window") + >>> plt.ylabel("Magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + >>> plt.axis('tight'); plt.show() + """ if M < 1: return array([]) @@ -1241,7 +1883,7 @@ def bartlett(M): ----- The Bartlett window is defined as - .. math:: w(n) = \\frac{2}{M-1} \left( + .. math:: w(n) = \\frac{2}{M-1} \\left( \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| \\right) @@ -1251,19 +1893,24 @@ def bartlett(M): window produces linear interpolation. It is also known as an apodization (which means"removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or - tapering function. + tapering function. The fourier transform of the Bartlett is the product + of two sinc functions. + Note the excellent discussion in Kanasewich. References ---------- .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", Biometrika 37, 1-16, 1950. - .. [2] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 109-110. + .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal Processing", Prentice-Hall, 1999, pp. 468-471. - .. [3] Wikipedia, "Window function", + .. [4] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function - .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 429. + Examples -------- >>> np.bartlett(12) @@ -1273,26 +1920,27 @@ def bartlett(M): Plot the window and its frequency response (requires SciPy and matplotlib): - from scipy.fftpack import fft - from matplotlib import pyplot as plt - - window = np.bartlett(51) - plt.plot(window) #doctest: SKIP - plt.title("Bartlett window") - plt.ylabel("Amplitude") - plt.xlabel("Sample") - plt.show() - - A = fft(window, 2048) / 25.5 - mag = abs(np.fft.fftshift(A)) - freq = linspace(-0.5,0.5,len(A)) - response = 20*np.log10(mag) - response = np.clip(response,-100,100) - plt.plot(freq, response) - plt.title("Frequency response of Bartlett window") - plt.ylabel("Magnitude [dB]") - plt.xlabel("Normalized frequency [cycles per sample]") - plt.axis('tight'); plt.show() + >>> from numpy import clip, log10, array, bartlett + >>> from numpy.fft import fft + >>> import matplotlib.pyplot as plt + + >>> window = bartlett(51) + >>> plt.plot(window) + >>> plt.title("Bartlett window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + >>> plt.show() + + >>> A = fft(window, 2048) / 25.5 + >>> mag = abs(fftshift(A)) + >>> freq = linspace(-0.5,0.5,len(A)) + >>> response = 20*log10(mag) + >>> response = clip(response,-100,100) + >>> plt.plot(freq, response) + >>> plt.title("Frequency response of Bartlett window") + >>> plt.ylabel("Magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + >>> plt.axis('tight'); plt.show() """ if M < 1: @@ -1303,7 +1951,87 @@ def bartlett(M): return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1)) def hanning(M): - """hanning(M) returns the M-point Hanning window. + """ + Return the Hanning window. + + The Hanning window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : array + The window, normalized to one (the value one + appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hamming, kaiser + + Notes + ----- + The Hanning window is defined as + + .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hanning was named for Julius van Hann, an Austrian meterologist. It is + also known as the Cosine Bell. Some authors prefer that it be called a + Hann window, to help avoid confusion with the very similar Hamming window. + + Most references to the Hanning window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 106-108. + .. [3] Wikipedia, "Window function", + http://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> from numpy import hanning + >>> hanning(12) + array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, + 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, + 0.07937323, 0. ]) + + Plot the window and its frequency response: + + >>> from numpy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = np.hanning(51) + >>> plt.subplot(121) + >>> plt.plot(window) + >>> plt.title("Hann window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> A = fft(window, 2048) / 25.5 + >>> mag = abs(fftshift(A)) + >>> freq = np.linspace(-0.5,0.5,len(A)) + >>> response = 20*np.log10(mag) + >>> response = np.clip(response,-100,100) + >>> plt.subplot(122) + >>> plt.plot(freq, response) + >>> plt.title("Frequency response of the Hann window") + >>> plt.ylabel("Magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + >>> plt.axis('tight'); plt.show() + """ if M < 1: return array([]) @@ -1313,7 +2041,86 @@ def hanning(M): return 0.5-0.5*cos(2.0*pi*n/(M-1)) def hamming(M): - """hamming(M) returns the M-point Hamming window. + """ + Return the Hamming window. + + The Hamming window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray + The window, normalized to one (the value one + appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hanning, kaiser + + Notes + ----- + The Hamming window is defined as + + .. math:: w(n) = 0.54 + 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and + is described in Blackman and Tukey. It was recommended for smoothing the + truncated autocovariance function in the time domain. + Most references to the Hamming window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 109-110. + .. [3] Wikipedia, "Window function", + http://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> from numpy import hamming + >>> hamming(12) + array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, + 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, + 0.15302337, 0.08 ]) + + Plot the window and the frequency response: + + >>> from numpy import clip, log10, array, hamming + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = hamming(51) + >>> plt.plot(window) + >>> plt.title("Hamming window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + >>> plt.show() + + >>> A = fft(window, 2048) / 25.5 + >>> mag = abs(fftshift(A)) + >>> freq = linspace(-0.5,0.5,len(A)) + >>> response = 20*log10(mag) + >>> response = clip(response,-100,100) + >>> plt.plot(freq, response) + >>> plt.title("Frequency response of Hamming window") + >>> plt.ylabel("Magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + >>> plt.axis('tight'); plt.show() + """ if M < 1: return array([]) @@ -1401,6 +2208,27 @@ def _i0_2(x): return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) def i0(x): + """ + Modified Bessel function of the first kind, order 0, :math:`I_0` + + Parameters + ---------- + x : array-like, dtype float or complex + Argument of the Bessel function. + + Returns + ------- + out : ndarray, shape z.shape, dtype z.dtype + The modified Bessel function evaluated for all elements of `x`. + + Examples + -------- + >>> np.i0([0.]) + array(1.0) + >>> np.i0([0., 1. + 2j]) + array([ 1.00000000+0.j , 0.18785373+0.64616944j]) + + """ x = atleast_1d(x).copy() y = empty_like(x) ind = (x<0) @@ -1414,8 +2242,117 @@ def i0(x): ## End of cephes code for i0 def kaiser(M,beta): - """kaiser(M, beta) returns a Kaiser window of length M with shape parameter - beta. + """ + Return the Kaiser window. + + The Kaiser window is a taper formed by using a Bessel function. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + beta : float + Shape parameter for window. + + Returns + ------- + out : array + The window, normalized to one (the value one + appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hamming, hanning + + Notes + ----- + The Kaiser window is defined as + + .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} + \\right)/I_0(\\beta) + + with + + .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, + + where :math:`I_0` is the modified zeroth-order Bessel function. + + The Kaiser was named for Jim Kaiser, who discovered a simple approximation + to the DPSS window based on Bessel functions. + The Kaiser window is a very good approximation to the Digital Prolate + Spheroidal Sequence, or Slepian window, which is the transform which + maximizes the energy in the main lobe of the window relative to total + energy. + + The Kaiser can approximate many other windows by varying the beta + parameter. + + ==== ======================= + beta Window shape + ==== ======================= + 0 Rectangular + 5 Similar to a Hamming + 6 Similar to a Hanning + 8.6 Similar to a Blackman + ==== ======================= + + A beta value of 14 is probably a good starting point. Note that as beta + gets large, the window narrows, and so the number of samples needs to be + large enough to sample the increasingly narrow spike, otherwise nans will + get returned. + + + Most references to the Kaiser window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by + digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. + John Wiley and Sons, New York, (1966). + .. [2]\tE.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 177-178. + .. [3] Wikipedia, "Window function", + http://en.wikipedia.org/wiki/Window_function + + Examples + -------- + >>> from numpy import kaiser + >>> kaiser(12, 14) + array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02, + 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, + 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, + 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) + + + Plot the window and the frequency response: + + >>> from numpy import clip, log10, array, kaiser + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = kaiser(51, 14) + >>> plt.plot(window) + >>> plt.title("Kaiser window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + >>> plt.show() + + >>> A = fft(window, 2048) / 25.5 + >>> mag = abs(fftshift(A)) + >>> freq = linspace(-0.5,0.5,len(A)) + >>> response = 20*log10(mag) + >>> response = clip(response,-100,100) + >>> plt.plot(freq, response) + >>> plt.title("Frequency response of Kaiser window") + >>> plt.ylabel("Magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + >>> plt.axis('tight'); plt.show() + """ from numpy.dual import i0 n = arange(0,M) @@ -1423,7 +2360,74 @@ def kaiser(M,beta): return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(beta) def sinc(x): - """sinc(x) returns sin(pi*x)/(pi*x) at all points of array x. + """ + Return the sinc function. + + The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. + + Parameters + ---------- + x : ndarray + Array (possibly multi-dimensional) of values for which to to + calculate ``sinc(x)``. + + Returns + ------- + out : ndarray + ``sinc(x)``, which has the same shape as the input. + + Notes + ----- + ``sinc(0)`` is the limit value 1. + + The name sinc is short for "sine cardinal" or "sinus cardinalis". + + The sinc function is used in various signal processing applications, + including in anti-aliasing, in the construction of a + Lanczos resampling filter, and in interpolation. + + For bandlimited interpolation of discrete-time signals, the ideal + interpolation kernel is proportional to the sinc function. + + References + ---------- + .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web + Resource. http://mathworld.wolfram.com/SincFunction.html + .. [2] Wikipedia, "Sinc function", + http://en.wikipedia.org/wiki/Sinc_function + + Examples + -------- + >>> x = np.arange(-20., 21.)/5. + >>> np.sinc(x) + array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02, + -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, + 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, + 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, + -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, + 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, + 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, + 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, + 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, + -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, + -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, + 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, + -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, + -4.92362781e-02, -3.89804309e-17]) + + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, sinc(x)) + >>> plt.title("Sinc Function") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("X") + >>> plt.show() + + It works in 2-D as well: + + >>> x = np.arange(-200., 201.)/50. + >>> xx = np.outer(x, x) + >>> plt.imshow(sinc(xx)) + """ y = pi* where(x == 0, 1.0e-20, x) return sin(y)/y @@ -1434,7 +2438,8 @@ def msort(a): return b def median(a, axis=0, out=None, overwrite_input=False): - """Compute the median along the specified axis. + """ + Compute the median along the specified axis. Returns the median of the array elements. The median is taken over the first axis of the array by default, otherwise over @@ -1442,44 +2447,44 @@ def median(a, axis=0, out=None, overwrite_input=False): Parameters ---------- - a : array-like - Input array or object that can be converted to an array + a : array_like + Input array or object that can be converted to an array. axis : {int, None}, optional Axis along which the medians are computed. The default is to - compute the median along the first dimension. axis=None - returns the median of the flattened array - + compute the median along the first dimension. If `axis` is + set to None, return the median of the flattened array. out : ndarray, optional Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. - + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. overwrite_input : {False, True}, optional If True, then allow use of memory of input array (a) for calculations. The input array will be modified by the call to median. This will save memory when you do not need to preserve the contents of the input array. Treat the input as undefined, but it will probably be fully or partially sorted. Default is - False. Note that, if overwrite_input is true, and the input + False. Note that, if `overwrite_input` is True and the input is not already an ndarray, an error will be raised. Returns ------- - median : ndarray. - A new array holding the result is returned unless out is - specified, in which case a reference to out is returned. - Return datatype is float64 for ints and floats smaller than - float64, or the input datatype otherwise. + median : ndarray + A new array holding the result (unless `out` is specified, in + which case that array is returned instead). If the input contains + integers, or floats of smaller precision than 64, then the output + data-type is float64. Otherwise, the output data-type is the same + as that of the input. See Also - ------- + -------- mean Notes ----- - Given a vector V length N, the median of V is the middle value of - a sorted copy of V (Vs) - i.e. Vs[(N-1)/2], when N is odd. It is - the mean of the two middle values of Vs, when N is even. + Given a vector V of length N, the median of V is the middle value of + a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is + odd. When N is even, it is the average of the two middle values of + ``V_sorted``. Examples -------- @@ -1507,6 +2512,7 @@ def median(a, axis=0, out=None, overwrite_input=False): >>> np.median(b, axis=None, overwrite_input=True) 3.5 >>> assert not np.all(a==b) + """ if overwrite_input: if axis is None: @@ -1531,8 +2537,29 @@ def median(a, axis=0, out=None, overwrite_input=False): return mean(sorted[indexer], axis=axis, out=out) def trapz(y, x=None, dx=1.0, axis=-1): - """Integrate y(x) using samples along the given axis and the composite - trapezoidal rule. If x is None, spacing given by dx is assumed. + """ + Integrate along the given axis using the composite trapezoidal rule. + + Integrate `y` (`x`) along given axis. + + Parameters + ---------- + y : array_like + Input array to integrate. + x : {array_like, None}, optional + If `x` is None, then spacing between all `y` elements is 1. + dx : scalar, optional + If `x` is None, spacing given by `dx` is assumed. + axis : int, optional + Specify the axis. + + Examples + -------- + >>> np.trapz([1,2,3]) + >>> 4.0 + >>> np.trapz([1,2,3], [4,6,8]) + >>> 8.0 + """ y = asarray(y) if x is None: @@ -1579,27 +2606,44 @@ def add_newdoc(place, obj, doc): # From matplotlib def meshgrid(x,y): """ - For vectors x, y with lengths Nx=len(x) and Ny=len(y), return X, Y - where X and Y are (Ny, Nx) shaped arrays with the elements of x - and y repeated to fill the matrix + Return coordinate matrices from two coordinate vectors. + + - EG, - [X, Y] = meshgrid([1,2,3], [4,5,6,7]) + Parameters + ---------- + x, y : ndarray + Two 1D arrays representing the x and y coordinates - X = - 1 2 3 - 1 2 3 - 1 2 3 - 1 2 3 + Returns + ------- + X, Y : ndarray + For vectors `x`, `y` with lengths Nx=len(`x`) and Ny=len(`y`), + return `X`, `Y` where `X` and `Y` are (Ny, Nx) shaped arrays + with the elements of `x` and y repeated to fill the matrix along + the first dimension for `x`, the second for `y`. + See Also + -------- + numpy.mgrid : Construct a multi-dimensional "meshgrid" + using indexing notation. + + Examples + -------- + >>> X, Y = numpy.meshgrid([1,2,3], [4,5,6,7]) + >>> X + array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3], + [1, 2, 3]]) + >>> Y + array([[4, 4, 4], + [5, 5, 5], + [6, 6, 6], + [7, 7, 7]]) - Y = - 4 4 4 - 5 5 5 - 6 6 6 - 7 7 7 - """ + """ x = asarray(x) y = asarray(y) numRows, numCols = len(y), len(x) # yes, reversed @@ -1611,30 +2655,41 @@ def meshgrid(x,y): return X, Y def delete(arr, obj, axis=None): - """Return a new array with sub-arrays along an axis deleted. - - Return a new array with the sub-arrays (i.e. rows or columns) - deleted along the given axis as specified by obj + """ + Return a new array with sub-arrays along an axis deleted. - obj may be a slice_object (s_[3:5:2]) or an integer - or an array of integers indicated which sub-arrays to - remove. + Parameters + ---------- + arr : array-like + Input array. + obj : slice, integer or an array of integers + Indicate which sub-arrays to remove. + axis : integer or None + The axis along which to delete the subarray defined by `obj`. + If `axis` is None, `obj` is applied to the flattened array. - If axis is None, then ravel the array first. + See Also + -------- + insert : Insert values into an array. + append : Append values at the end of an array. Examples -------- - >>> arr = [[3,4,5], - ... [1,2,3], - ... [6,7,8]] - - >>> np.delete(arr, 1, 1) - array([[3, 5], - [1, 3], - [6, 8]]) + >>> arr = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12]]) + >>> arr + array([[ 1, 2, 3, 4], + [ 5, 6, 7, 8], + [ 9, 10, 11, 12]]) >>> np.delete(arr, 1, 0) - array([[3, 4, 5], - [6, 7, 8]]) + array([[ 1, 2, 3, 4], + [ 9, 10, 11, 12]]) + >>> np.delete(arr, np.s_[::2], 1) + array([[ 2, 4], + [ 6, 8], + [10, 12]]) + >>> np.delete(arr, [1,3,5], None) + array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) + """ wrap = None if type(arr) is not ndarray: @@ -1718,26 +2773,33 @@ def delete(arr, obj, axis=None): return new def insert(arr, obj, values, axis=None): - """Return a new array with values inserted along the given axis - before the given indices - - If axis is None, then ravel the array first. + """ + Insert values along the given axis before the given indices. - The obj argument can be an integer, a slice, or a sequence of - integers. + Parameters + ---------- + arr : array_like + Input array. + obj : {integer, slice, integer array_like} + Insert `values` before `obj` indices. + values : + Values to insert into `arr`. + axis : int, optional + Axis along which to insert `values`. If `axis` is None then ravel + `arr` first. Examples -------- >>> a = np.array([[1,2,3], ... [4,5,6], ... [7,8,9]]) - >>> np.insert(a, [1,2], [[4],[5]], axis=0) array([[1, 2, 3], [4, 4, 4], [4, 5, 6], [5, 5, 5], [7, 8, 9]]) + """ wrap = None if type(arr) is not ndarray: @@ -1807,7 +2869,38 @@ def insert(arr, obj, values, axis=None): return new def append(arr, values, axis=None): - """Append to the end of an array along axis (ravel first if None) + """ + Append values to the end of an array. + + Parameters + ---------- + arr : array_like + Values are appended to a copy of this array. + values : array_like + These values are appended to a copy of `arr`. It must be of the + correct shape (the same shape as `arr`, excluding `axis`). If `axis` + is not specified, `values` can be any shape and will be flattened + before use. + axis : int, optional + The axis along which `values` are appended. If `axis` is not given, + both `arr` and `values` are flattened before use. + + Returns + ------- + out : ndarray + A copy of `arr` with `values` appended to `axis`. Note that `append` + does not occur in-place: a new array is allocated and filled. + + Examples + -------- + >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) + array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + + >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + """ arr = asanyarray(arr) if axis is None: diff --git a/numpy/lib/getlimits.py b/numpy/lib/getlimits.py index cab741e2e..4c432c0e4 100644 --- a/numpy/lib/getlimits.py +++ b/numpy/lib/getlimits.py @@ -21,7 +21,46 @@ _convert_to_float = { } class finfo(object): - """ Machine limits for floating point types. + """ + Machine limits for floating point types. + + Attributes + ---------- + eps : floating point number of the appropriate type + The smallest representable number such that ``1.0 + eps != 1.0``. + epsneg : floating point number of the appropriate type + The smallest representable number such that ``1.0 - epsneg != 1.0``. + iexp : int + The number of bits in the exponent portion of the floating point + representation. + machar : MachAr + The object which calculated these parameters and holds more detailed + information. + machep : int + The exponent that yields ``eps``. + max : floating point number of the appropriate type + The largest representable number. + maxexp : int + The smallest positive power of the base (2) that causes overflow. + min : floating point number of the appropriate type + The smallest representable number, typically ``-max``. + minexp : int + The most negative power of the base (2) consistent with there being + no leading 0s in the mantissa. + negep : int + The exponent that yields ``epsneg``. + nexp : int + The number of bits in the exponent including its sign and bias. + nmant : int + The number of bits in the mantissa. + precision : int + The approximate number of decimal digits to which this kind of float + is precise. + resolution : floating point number of the appropriate type + The approximate decimal resolution of this type, i.e. + ``10**-precision``. + tiny : floating point number of the appropriate type + The smallest-magnitude usable number. Parameters ---------- @@ -30,14 +69,18 @@ class finfo(object): See Also -------- - numpy.lib.machar.MachAr + numpy.lib.machar.MachAr : + The implementation of the tests that produce this information. + iinfo : + The equivalent for integer data types. Notes ----- For developers of numpy: do not instantiate this at the module level. The initial calculation of these parameters is expensive and negatively impacts - import times. These objects are cached, so calling `finfo()` repeatedly + import times. These objects are cached, so calling ``finfo()`` repeatedly inside your functions is not a problem. + """ _finfo_cache = {} @@ -126,10 +169,47 @@ nexp =%(nexp)6s min= -max class iinfo: - """Limits for integer types. + """ + Machine limits for integer types. + + Attributes + ---------- + min : int + The smallest integer expressible by the type. + max : int + The largest integer expressible by the type. + + Parameters + ---------- + type : integer type, dtype, or instance + The kind of integer data type to get information about. + + See Also + -------- + finfo : The equivalent for floating point data types. + + Examples + -------- + With types: + + >>> ii16 = np.iinfo(np.int16) + >>> ii16.min + -32768 + >>> ii16.max + 32767 + >>> ii32 = np.iinfo(np.int32) + >>> ii32.min + -2147483648 + >>> ii32.max + 2147483647 + + With instances: - :Parameters: - type : integer type or instance + >>> ii32 = np.iinfo(np.int32(10)) + >>> ii32.min + -2147483648 + >>> ii32.max + 2147483647 """ diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py index 2024584d4..af87b24a6 100644 --- a/numpy/lib/index_tricks.py +++ b/numpy/lib/index_tricks.py @@ -17,19 +17,41 @@ makemat = matrix.matrix # contributed by Stefan van der Walt def unravel_index(x,dims): - """Convert a flat index into an index tuple for an array of given shape. + """ + Convert a flat index into an index tuple for an array of given shape. e.g. for a 2x2 array, unravel_index(2,(2,2)) returns (1,0). - Example usage: - p = x.argmax() - idx = unravel_index(p,x.shape) - x[idx] == x.max() + Parameters + ---------- + x : int + Flattened index. + dims : shape tuple + Input shape. - Note: x.flat[p] == x.max() + Notes + ----- + Since x.flat[p] == x.max() it may be easier to use flattened indexing + than to re-map the index to a tuple. + + Examples + -------- + >>> x = np.ones((5,4)) + >>> x + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15], + [16, 17, 18, 19]]) + >>> p = x.argmax() + >>> p + 19 + >>> idx = np.unravel_index(p, x.shape) + >>> idx + (4, 3) + >>> x[idx] == x.max() + True - Thus, it may be easier to use flattened indexing than to re-map - the index to a tuple. """ if x > _nx.prod(dims)-1 or x < 0: raise ValueError("Invalid index, must be 0 <= x <= number of elements.") @@ -92,7 +114,7 @@ class nd_grid(object): complex number, then the stop is not inclusive. However, if the step length is a **complex number** (e.g. 5j), then the - integer part of it's magnitude is interpreted as specifying the + integer part of its magnitude is interpreted as specifying the number of points to create between the start and stop values, where the stop value **is inclusive**. @@ -342,9 +364,17 @@ c_ = CClass() class ndenumerate(object): """ - A simple nd index iterator over an array. + Multidimensional index iterator. - Example: + Return an iterator yielding pairs of array coordinates and values. + + Parameters + ---------- + a : ndarray + Input array. + + Examples + -------- >>> a = np.array([[1,2],[3,4]]) >>> for index, x in np.ndenumerate(a): ... print index, x @@ -352,6 +382,7 @@ class ndenumerate(object): (0, 1) 2 (1, 0) 3 (1, 1) 4 + """ def __init__(self, arr): self.iter = asarray(arr).flat diff --git a/numpy/lib/io.py b/numpy/lib/io.py index cc9cf65e0..41ffc51b8 100644 --- a/numpy/lib/io.py +++ b/numpy/lib/io.py @@ -80,33 +80,43 @@ class NpzFile(object): raise KeyError, "%s is not a file in the archive" % key def load(file, memmap=False): - """Load a binary file. - - Read a binary file (either a pickle, or a binary .npy/.npz file) and - return the result. + """ + Load pickled, ``.npy``, and ``.npz`` binary files. Parameters ---------- file : file-like object or string - the file to read. It must support seek and read methods + The file to read. It must support seek and read methods. memmap : bool - If true, then memory-map the .npy file or unzip the .npz file into - a temporary directory and memory-map each component - This has no effect for a pickle. + If True, then memory-map the ``.npy`` file (or unzip the ``.npz`` file + into a temporary directory and memory-map each component). This has no + effect for a pickled file. Returns ------- result : array, tuple, dict, etc. - data stored in the file. - If file contains pickle data, then whatever is stored in the pickle is - returned. - If the file is .npy file, then an array is returned. - If the file is .npz file, then a dictionary-like object is returned - which has a filename:array key:value pair for every file in the zip. + Data stored in the file. + + - If file contains pickle data, then whatever is stored in the + pickle is returned. + + - If the file is a ``.npy`` file, then an array is returned. + + - If the file is a ``.npz`` file, then a dictionary-like object is + returned, containing {filename: array} key-value pairs, one for + every file in the archive. Raises ------ IOError + If the input file does not exist or cannot be read. + + Examples + -------- + >>> np.save('/tmp/123', np.array([1, 2, 3]) + >>> np.load('/tmp/123.npy') + array([1, 2, 3]) + """ if isinstance(file, basestring): fid = _file(file,"rb") @@ -133,19 +143,29 @@ def load(file, memmap=False): "Failed to interpret file %s as a pickle" % repr(file) def save(file, arr): - """Save an array to a binary file (a string or file-like object). - - If the file is a string, then if it does not have the .npy extension, - it is appended and a file open. + """ + Save an array to a binary file in NumPy format. - Data is saved to the open file in NumPy-array format + Parameters + ---------- + f : file or string + File or filename to which the data is saved. If the filename + does not already have a ``.npy`` extension, it is added. + x : array_like + Array data. Examples -------- - import numpy as np - ... - np.save('myfile', a) - a = np.load('myfile.npy') + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + + >>> x = np.arange(10) + >>> np.save(outfile, x) + + >>> outfile.seek(0) + >>> np.load(outfile) + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + """ if isinstance(file, basestring): if not file.endswith('.npy'): @@ -226,53 +246,70 @@ def _string_like(obj): def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False): """ - Load ASCII data from fname into an array and return the array. + Load data from a text file. - The data must be regular, same number of values in every row + Each row in the text file must have the same number of values. Parameters ---------- - fname : filename or a file handle. - Support for gzipped files is automatic, if the filename ends in .gz - + fname : file or string + File or filename to read. If the filename extension is ``.gz``, + the file is first decompressed. dtype : data-type - Data type of the resulting array. If this is a record data-type, the - resulting array will be 1-d and each row will be interpreted as an - element of the array. The number of columns used must match the number - of fields in the data-type in this case. - - comments : str - The character used to indicate the start of a comment in the file. - - delimiter : str - A string-like character used to separate values in the file. If delimiter - is unspecified or none, any whitespace string is a separator. - + Data type of the resulting array. If this is a record data-type, + the resulting array will be 1-dimensional, and each row will be + interpreted as an element of the array. In this case, the number + of columns used must match the number of fields in the data-type. + comments : string, optional + The character used to indicate the start of a comment. + delimiter : string, optional + The string used to separate values. By default, this is any + whitespace. converters : {} - A dictionary mapping column number to a function that will convert that - column to a float. Eg, if column 0 is a date string: - converters={0:datestr2num}. Converters can also be used to provide - a default value for missing data: converters={3:lambda s: float(s or 0)}. - + A dictionary mapping column number to a function that will convert + that column to a float. E.g., if column 0 is a date string: + ``converters = {0: datestr2num}``. Converters can also be used to + provide a default value for missing data: + ``converters = {3: lambda s: float(s or 0)}``. skiprows : int - The number of rows from the top to skip. - + Skip the first `skiprows` lines. usecols : sequence - A sequence of integer column indexes to extract where 0 is the first - column, eg. usecols=(1,4,5) will extract the 2nd, 5th and 6th columns. - + Which columns to read, with 0 being the first. For example, + ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. unpack : bool - If True, will transpose the matrix allowing you to unpack into named - arguments on the left hand side. + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = loadtxt(...)`` + + Returns + ------- + out : ndarray + Data read from the text file. + + See Also + -------- + scipy.io.loadmat : reads Matlab(R) data files Examples -------- - >>> X = loadtxt('test.dat') # data in two columns - >>> x,y,z = load('somefile.dat', usecols=(3,5,7), unpack=True) - >>> r = np.loadtxt('record.dat', dtype={'names':('gender','age','weight'), - ... 'formats': ('S1','i4', 'f4')}) + >>> from StringIO import StringIO # StringIO behaves like a file object + >>> c = StringIO("0 1\\n2 3") + >>> np.loadtxt(c) + array([[ 0., 1.], + [ 2., 3.]]) + + >>> d = StringIO("M 21 72\\nF 35 58") + >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), + ... 'formats': ('S1', 'i4', 'f4')}) + array([('M', 21, 72.0), ('F', 35, 58.0)], + dtype=[('gender', '|S1'), ('age', '>> c = StringIO("1,0,2\\n3,0,4") + >>> x,y = np.loadtxt(c, delimiter=',', usecols=(0,2), unpack=True) + >>> x + array([ 1., 3.]) + >>> y + array([ 2., 4.]) - SeeAlso: scipy.io.loadmat to read and write matfiles. """ user_converters = converters @@ -373,8 +410,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None, def savetxt(fname, X, fmt='%.18e',delimiter=' '): """ - Save the data in X to file fname using fmt string to convert the - data to strings + Save an array to file. Parameters ---------- @@ -382,8 +418,8 @@ def savetxt(fname, X, fmt='%.18e',delimiter=' '): If the filename ends in .gz, the file is automatically saved in compressed gzip format. The load() command understands gzipped files transparently. - X : array or sequence - Data to write to file. + X : array_like + Data. fmt : string or sequence of strings A single format (%10.5f), a sequence of formats, or a multi-format string, e.g. 'Iteration %d -- %10.5f', in which @@ -391,43 +427,59 @@ def savetxt(fname, X, fmt='%.18e',delimiter=' '): delimiter : str Character separating columns. - Examples - -------- - >>> np.savetxt('test.out', x, delimiter=',') # X is an array - >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays - >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation + Notes + ----- + Further explanation of the `fmt` parameter + (``%[flag]width[.precision]specifier``): - Notes on fmt - ------------ flags: - - : left justify - + : Forces to preceed result with + or -. - 0 : Left pad the number with zeros instead of space (see width). + ``-`` : left justify + + ``+`` : Forces to preceed result with + or -. + + ``0`` : Left pad the number with zeros instead of space (see width). width: - Minimum number of characters to be printed. The value is not truncated. + Minimum number of characters to be printed. The value is not truncated + if it has more characters. precision: - - For integer specifiers (eg. d,i,o,x), the minimum number of + - For integer specifiers (eg. ``d,i,o,x``), the minimum number of digits. - - For e, E and f specifiers, the number of digits to print + - For ``e, E`` and ``f`` specifiers, the number of digits to print after the decimal point. - - For g and G, the maximum number of significant digits. - - For s, the maximum number of charac ters. + - For ``g`` and ``G``, the maximum number of significant digits. + - For ``s``, the maximum number of characters. specifiers: - c : character - d or i : signed decimal integer - e or E : scientific notation with e or E. - f : decimal floating point - g,G : use the shorter of e,E or f - o : signed octal - s : string of characters - u : unsigned decimal integer - x,X : unsigned hexadecimal integer + ``c`` : character + + ``d`` or ``i`` : signed decimal integer + + ``e`` or ``E`` : scientific notation with ``e`` or ``E``. + + ``f`` : decimal floating point + + ``g,G`` : use the shorter of ``e,E`` or ``f`` + + ``o`` : signed octal + + ``s`` : string of characters + + ``u`` : unsigned decimal integer + + ``x,X`` : unsigned hexadecimal integer This is not an exhaustive specification. + + + Examples + -------- + >>> savetxt('test.out', x, delimiter=',') # X is an array + >>> savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays + >>> savetxt('test.out', x, fmt='%1.4e') # use exponential notation + """ if _string_like(fname): @@ -478,8 +530,7 @@ def savetxt(fname, X, fmt='%.18e',delimiter=' '): import re def fromregex(file, regexp, dtype): """ - Construct an array from a text file, using regular-expressions - parsing. + Construct an array from a text file, using regular-expressions parsing. Array is constructed from all matches of the regular expression in the file. Groups in the regular expression are converted to fields. diff --git a/numpy/lib/machar.py b/numpy/lib/machar.py index 72fde37f2..facade612 100644 --- a/numpy/lib/machar.py +++ b/numpy/lib/machar.py @@ -13,40 +13,62 @@ from numpy.core.numeric import seterr # Need to speed this up...especially for longfloat class MachAr(object): - """Diagnosing machine parameters. + """ + Diagnosing machine parameters. - The following attributes are available: + Attributes + ---------- + ibeta : int + Radix in which numbers are represented. + it : int + Number of base-`ibeta` digits in the floating point mantissa M. + machep : int + Exponent of the smallest (most negative) power of `ibeta` that, + added to 1.0, gives something different from 1.0 + eps : float + Floating-point number ``beta**machep`` (floating point precision) + negep : int + Exponent of the smallest power of `ibeta` that, substracted + from 1.0, gives something different from 1.0. + epsneg : float + Floating-point number ``beta**negep``. + iexp : int + Number of bits in the exponent (including its sign and bias). + minexp : int + Smallest (most negative) power of `ibeta` consistent with there + being no leading zeros in the mantissa. + xmin : float + Floating point number ``beta**minexp`` (the smallest [in + magnitude] usable floating value). + maxexp : int + Smallest (positive) power of `ibeta` that causes overflow. + xmax : float + ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] + usable floating value). + irnd : int + In ``range(6)``, information on what kind of rounding is done + in addition, and on how underflow is handled. + ngrd : int + Number of 'guard digits' used when truncating the product + of two mantissas to fit the representation. - ibeta - radix in which numbers are represented - it - number of base-ibeta digits in the floating point mantissa M - machep - exponent of the smallest (most negative) power of ibeta that, - added to 1.0, - gives something different from 1.0 - eps - floating-point number beta**machep (floating point precision) - negep - exponent of the smallest power of ibeta that, substracted - from 1.0, gives something different from 1.0 - epsneg - floating-point number beta**negep - iexp - number of bits in the exponent (including its sign and bias) - minexp - smallest (most negative) power of ibeta consistent with there - being no leading zeros in the mantissa - xmin - floating point number beta**minexp (the smallest (in - magnitude) usable floating value) - maxexp - smallest (positive) power of ibeta that causes overflow - xmax - (1-epsneg)* beta**maxexp (the largest (in magnitude) - usable floating value) - irnd - in range(6), information on what kind of rounding is done - in addition, and on how underflow is handled - ngrd - number of 'guard digits' used when truncating the product - of two mantissas to fit the representation + epsilon : float + Same as `eps`. + tiny : float + Same as `xmin`. + huge : float + Same as `xmax`. + precision : float + ``- int(-log10(eps))`` + resolution : float + `` - 10**(-precision)`` - epsilon - same as eps - tiny - same as xmin - huge - same as xmax - precision - int(-log10(eps)) - resolution - 10**(-precision) + References + ---------- + .. [1] Press, Teukolsky, Vetterling and Flannery, + "Numerical Recipes in C++," 2nd ed, + Cambridge University Press, 2002, p. 31. - Reference: - Numerical Recipies. """ def __init__(self, float_conv=float,int_conv=int, float_to_float=float, diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py index 8fb0337dc..bce7f0e4e 100644 --- a/numpy/lib/polynomial.py +++ b/numpy/lib/polynomial.py @@ -46,15 +46,49 @@ def _lstsq(X, y, rcond): return lstsq(X, y, rcond) def poly(seq_of_zeros): - """ Return a sequence representing a polynomial given a sequence of roots. + """ + Return polynomial coefficients given a sequence of roots. + + Calculate the coefficients of a polynomial given the zeros + of the polynomial. + + If a square matrix is given, then the coefficients for + characteristic equation of the matrix, defined by + :math:`\\mathrm{det}(\\mathbf{A} - \\lambda \\mathbf{I})`, + are returned. + + Parameters + ---------- + seq_of_zeros : ndarray + A sequence of polynomial roots or a square matrix. + + Returns + ------- + coefs : ndarray + A sequence of polynomial coefficients representing the polynomial + + :math:`\\mathrm{coefs}[0] x^{n-1} + \\mathrm{coefs}[1] x^{n-2} + + ... + \\mathrm{coefs}[2] x + \\mathrm{coefs}[n]` + + See Also + -------- + numpy.poly1d : A one-dimensional polynomial class. + numpy.roots : Return the roots of the polynomial coefficients in p + numpy.polyfit : Least squares polynomial fit + + Examples + -------- + Given a sequence of polynomial zeros, - If the input is a matrix, return the characteristic polynomial. + >>> b = np.roots([1, 3, 1, 5, 6]) + >>> np.poly(b) + array([ 1., 3., 1., 5., 6.]) - Example: + Given a square matrix, - >>> b = np.roots([1,3,1,5,6]) - >>> np.poly(b) - array([ 1., 3., 1., 5., 6.]) + >>> P = np.array([[19, 3], [-2, 26]]) + >>> np.poly(P) + array([ 1., -45., 500.]) """ seq_of_zeros = atleast_1d(seq_of_zeros) @@ -86,11 +120,35 @@ def poly(seq_of_zeros): return a def roots(p): - """ Return the roots of the polynomial coefficients in p. + """ + Return the roots of a polynomial with coefficients given in p. + + The values in the rank-1 array `p` are coefficients of a polynomial. + If the length of `p` is n+1 then the polynomial is described by + p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] + + Parameters + ---------- + p : (N,) array_like + Rank-1 array of polynomial co-efficients. + + Returns + ------- + out : ndarray + An array containing the complex roots of the polynomial. + + Raises + ------ + ValueError: + When `p` cannot be converted to a rank-1 array. + + Examples + -------- + + >>> coeff = [3.2, 2, 1] + >>> print np.roots(coeff) + [-0.3125+0.46351241j -0.3125-0.46351241j] - The values in the rank-1 array p are coefficients of a polynomial. - If the length of p is n+1 then the polynomial is - p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] """ # If input is scalar, this makes it an array p = atleast_1d(p) @@ -128,12 +186,70 @@ def roots(p): return roots def polyint(p, m=1, k=None): - """Return the mth analytical integral of the polynomial p. + """ + Return an antiderivative (indefinite integral) of a polynomial. + + The returned order `m` antiderivative `P` of polynomial `p` satisfies + :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1` + integration constants `k`. The constants determine the low-order + polynomial part + + .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1} + + of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`. + + Parameters + ---------- + p : poly1d or sequence + Polynomial to differentiate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of the antiderivative. (Default: 1) + k : {None, list of `m` scalars, scalar}, optional + Integration constants. They are given in the order of integration: + those corresponding to highest-order terms come first. + + If ``None`` (default), all constants are assumed to be zero. + If `m = 1`, a single scalar can be given instead of a list. + + See Also + -------- + polyder : derivative of a polynomial + poly1d.integ : equivalent method + + Examples + -------- + The defining property of the antiderivative: + + >>> p = np.poly1d([1,1,1]) + >>> P = np.polyint(p) + poly1d([ 0.33333333, 0.5 , 1. , 0. ]) + >>> np.polyder(P) == p + True + + The integration constants default to zero, but can be specified: + + >>> P = np.polyint(p, 3) + >>> P(0) + 0.0 + >>> np.polyder(P)(0) + 0.0 + >>> np.polyder(P, 2)(0) + 0.0 + >>> P = np.polyint(p, 3, k=[6,5,3]) + >>> P + poly1d([ 0.01666667, 0.04166667, 0.16666667, 3., 5., 3. ]) + + Note that 3 = 6 / 2!, and that the constants are given in the order of + integrations. Constant of the highest-order polynomial term comes first: + + >>> np.polyder(P, 2)(0) + 6.0 + >>> np.polyder(P, 1)(0) + 5.0 + >>> P(0) + 3.0 - If k is None, then zero-valued constants of integration are used. - otherwise, k should be a list of length m (or a scalar if m=1) to - represent the constants of integration to use for each integration - (starting with k[0]) """ m = int(m) if m < 0: @@ -160,7 +276,55 @@ def polyint(p, m=1, k=None): return val def polyder(p, m=1): - """Return the mth derivative of the polynomial p. + """ + Return the derivative of order m of a polynomial. + + Parameters + ---------- + p : poly1d or sequence + Polynomial to differentiate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of differentiation (default: 1) + + Returns + ------- + der : poly1d + A new polynomial representing the derivative. + + See Also + -------- + polyint : Anti-derivative of a polynomial. + + Examples + -------- + The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: + + >>> p = np.poly1d([1,1,1,1]) + >>> p2 = np.polyder(p) + >>> p2 + poly1d([3, 2, 1]) + + which evaluates to: + + >>> p2(2.) + 17.0 + + We can verify this, approximating the derivative with + ``(f(x + h) - f(x))/h``: + + >>> (p(2. + 0.001) - p(2.)) / 0.001 + 17.007000999997857 + + The fourth-order derivative of a 3rd-order polynomial is zero: + + >>> np.polyder(p, 2) + poly1d([6, 2]) + >>> np.polyder(p, 3) + poly1d([6]) + >>> np.polyder(p, 4) + poly1d([ 0.]) + """ m = int(m) truepoly = isinstance(p, poly1d) @@ -178,107 +342,134 @@ def polyder(p, m=1): return val def polyfit(x, y, deg, rcond=None, full=False): - """Least squares polynomial fit. - - Do a best fit polynomial of degree 'deg' of 'x' to 'y'. Return value is a - vector of polynomial coefficients [pk ... p1 p0]. Eg, for n=2 + """ + Least squares polynomial fit. - p2*x0^2 + p1*x0 + p0 = y1 - p2*x1^2 + p1*x1 + p0 = y1 - p2*x2^2 + p1*x2 + p0 = y2 - ..... - p2*xk^2 + p1*xk + p0 = yk + Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` + to points `(x, y)`. Returns a vector of coefficients `p` that minimises + the squared error. Parameters ---------- - x : array_like - 1D vector of sample points. - y : array_like - 1D vector or 2D array of values to fit. The values should run down the - columes in the 2D case. - deg : integer + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int Degree of the fitting polynomial - rcond: {None, float}, optional + rcond : float, optional Relative condition number of the fit. Singular values smaller than this - relative to the largest singular value will be ignored. The defaul value - is len(x)*eps, where eps is the relative precision of the float type, - about 2e-16 in most cases. - full : {False, boolean}, optional - Switch determining nature of return value. When it is False just the - coefficients are returned, when True diagnostic information from the - singular value decomposition is also returned. + relative to the largest singular value will be ignored. The default + value is len(x)*eps, where eps is the relative precision of the float + type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is + False (the default) just the coefficients are returned, when True + diagnostic information from the singular value decomposition is also + returned. Returns ------- - coefficients, [residuals, rank, singular_values, rcond] : variable - When full=False, only the coefficients are returned, running down the - appropriate colume when y is a 2D array. When full=True, the rank of the - scaled Vandermonde matrix, it's effective rank in light of the rcond - value, its singular values, and the specified value of rcond are also - returned. + p : ndarray, shape (M,) or (M, K) + Polynomial coefficients, highest power first. + If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``. + + residuals, rank, singular_values, rcond : present only if `full` = True + Residuals of the least-squares fit, the effective rank of the scaled + Vandermonde coefficient matrix, its singular values, and the specified + value of `rcond`. For more details, see `linalg.lstsq`. Warns ----- - RankWarning : if rank is reduced and not full output - The warnings can be turned off by: - >>> import warnings - >>> warnings.simplefilter('ignore',np.RankWarning) + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. + The warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) See Also -------- - polyval : computes polynomial values. + polyval : Computes polynomial values. + linalg.lstsq : Computes a least-squares fit. + scipy.interpolate.UnivariateSpline : Computes spline fits. Notes ----- - If X is a the Vandermonde Matrix computed from x (see - http://mathworld.wolfram.com/VandermondeMatrix.html), then the - polynomial least squares solution is given by the 'p' in - - X*p = y - - where X.shape is a matrix of dimensions (len(x), deg + 1), p is a vector of - dimensions (deg + 1, 1), and y is a vector of dimensions (len(x), 1). - - This equation can be solved as - - p = (XT*X)^-1 * XT * y - - where XT is the transpose of X and -1 denotes the inverse. However, this - method is susceptible to rounding errors and generally the singular value - decomposition of the matrix X is preferred and that is what is done here. - The singular value method takes a paramenter, 'rcond', which sets a limit on - the relative size of the smallest singular value to be used in solving the - equation. This may result in lowering the rank of the Vandermonde matrix, in - which case a RankWarning is issued. If polyfit issues a RankWarning, try a - fit of lower degree or replace x by x - x.mean(), both of which will - generally improve the condition number. The routine already normalizes the - vector x by its maximum absolute value to help in this regard. The rcond - parameter can be set to a value smaller than its default, but the resulting - fit may be spurious. The current default value of rcond is len(x)*eps, where - eps is the relative precision of the floating type being used, generally - around 1e-7 and 2e-16 for IEEE single and double precision respectively. - This value of rcond is fairly conservative but works pretty well when x - - x.mean() is used in place of x. - - - DISCLAIMER: Power series fits are full of pitfalls for the unwary once the - degree of the fit becomes large or the interval of sample points is badly - centered. The problem is that the powers x**n are generally a poor basis for - the polynomial functions on the sample interval, resulting in a Vandermonde - matrix is ill conditioned and coefficients sensitive to rounding erros. The - computation of the polynomial values will also sensitive to rounding errors. - Consequently, the quality of the polynomial fit should be checked against - the data whenever the condition number is large. The quality of polynomial - fits *can not* be taken for granted. If all you want to do is draw a smooth - curve through the y values and polyfit is not doing the job, try centering - the sample range or look into scipy.interpolate, which includes some nice - spline fitting functions that may be of use. - - For more info, see - http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html, - but note that the k's and n's in the superscripts and subscripts - on that page. The linear algebra is correct, however. + The solution minimizes the squared error + + .. math :: + E = \\sum_{j=0}^k |p(x_j) - y_j|^2 + + in the equations:: + + x[0]**n * p[n] + ... + x[0] * p[1] + p[0] = y[0] + x[1]**n * p[n] + ... + x[1] * p[1] + p[0] = y[1] + ... + x[k]**n * p[n] + ... + x[k] * p[1] + p[0] = y[k] + + The coefficient matrix of the coefficients `p` is a Vandermonde matrix. + + `polyfit` issues a `RankWarning` when the least-squares fit is badly + conditioned. This implies that the best fit is not well-defined due + to numerical error. The results may be improved by lowering the polynomial + degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter + can also be set to a value smaller than its default, but the resulting + fit may be spurious: including contributions from the small singular + values can add numerical noise to the result. + + Note that fitting polynomial coefficients is inherently badly conditioned + when the degree of the polynomial is large or the interval of sample points + is badly centered. The quality of the fit should always be checked in these + cases. When polynomial fits are not satisfactory, splines may be a good + alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + http://en.wikipedia.org/wiki/Curve_fitting + .. [2] Wikipedia, "Polynomial interpolation", + http://en.wikipedia.org/wiki/Polynomial_interpolation + + Examples + -------- + >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) + >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) + >>> z = np.polyfit(x, y, 3) + array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) + + It is convenient to use `poly1d` objects for dealing with polynomials: + + >>> p = np.poly1d(z) + >>> p(0.5) + 0.6143849206349179 + >>> p(3.5) + -0.34732142857143039 + >>> p(10) + 22.579365079365115 + + High-order polynomials may oscillate wildly: + + >>> p30 = np.poly1d(np.polyfit(x, y, 30)) + /... RankWarning: Polyfit may be poorly conditioned... + >>> p30(4) + -0.80000000000000204 + >>> p30(5) + -0.99999999999999445 + >>> p30(4.5) + -0.10547061179440398 + + Illustration: + + >>> import matplotlib.pyplot as plt + >>> xp = np.linspace(-2, 6, 100) + >>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--') + >>> plt.ylim(-2,2) + >>> plt.show() """ order = int(deg) + 1 @@ -330,36 +521,48 @@ def polyfit(x, y, deg, rcond=None, full=False): def polyval(p, x): - """Evaluate the polynomial p at x. + """ + Evaluate the polynomial p at x. If p is of length N, this function returns the value: p[0]*(x**N-1) + p[1]*(x**N-2) + ... + p[N-2]*x + p[N-1] - If x is a sequence then p(x) will be returned for all elements of x. If x is - another polynomial then the composite polynomial p(x) will be returned. + If x is a sequence then p(x) will be returned for all elements of x. + If x is another polynomial then the composite polynomial p(x) will + be returned. Parameters ---------- p : {array_like, poly1d} - 1D array of polynomial coefficients from highest degree to zero or an - instance of poly1d. + 1D array of polynomial coefficients from highest degree to zero or an + instance of poly1d. x : {array_like, poly1d} - A number, a 1D array of numbers, or an instance of poly1d. + A number, a 1D array of numbers, or an instance of poly1d. Returns ------- values : {array, poly1d} - If either p or x is an instance of poly1d, then an instance of poly1d is - returned, otherwise a 1D array is returned. In the case where x is a - poly1d, the result is the composition of the two polynomials, i.e., - substitution is used. + If either p or x is an instance of poly1d, then an instance of poly1d + is returned, otherwise a 1D array is returned. In the case where x is + a poly1d, the result is the composition of the two polynomials, i.e., + substitution is used. + + See Also + -------- + poly1d: A polynomial class. Notes ----- - Horners method is used to evaluate the polynomial. Even so, for polynomial - if high degree the values may be inaccurate due to rounding errors. Use - carefully. + Horner's method is used to evaluate the polynomial. Even so, for + polynomials of high degree the values may be inaccurate due to + rounding errors. Use carefully. + + + Examples + -------- + >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 + 76 """ p = NX.asarray(p) @@ -473,24 +676,90 @@ def _raise_power(astr, wrap=70): class poly1d(object): - """A one-dimensional polynomial class. + """ + A one-dimensional polynomial class. + + Parameters + ---------- + c_or_r : array_like + Polynomial coefficients, in decreasing powers. E.g., + ``(1, 2, 3)`` implies :math:`x^2 + 2x + 3`. If `r` is set + to True, these coefficients specify the polynomial roots + (values where the polynomial evaluate to 0) instead. + r : bool, optional + If True, `c_or_r` gives the polynomial roots. Default is False. + + Examples + -------- + Construct the polynomial :math:`x^2 + 2x + 3`: + + >>> p = np.poly1d([1, 2, 3]) + >>> print np.poly1d(p) + 2 + 1 x + 2 x + 3 + + Evaluate the polynomial: + + >>> p(0.5) + 4.25 + + Find the roots: + + >>> p.r + array([-1.+1.41421356j, -1.-1.41421356j]) + + Show the coefficients: + + >>> p.c + array([1, 2, 3]) + + Display the order (the leading zero-coefficients are removed): + + >>> p.order + 2 + + Show the coefficient of the k-th power in the polynomial + (which is equivalent to ``p.c[-(i+1)]``): + + >>> p[1] + 2 + + Polynomials can be added, substracted, multplied and divided + (returns quotient and remainder): + + >>> p * p + poly1d([ 1, 4, 10, 12, 9]) + + >>> (p**3 + 4) / p + (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4])) - p = poly1d([1,2,3]) constructs the polynomial x**2 + 2 x + 3 + ``asarray(p)`` gives the coefficient array, so polynomials can be + used in all functions that accept arrays: - p(0.5) evaluates the polynomial at the location - p.r is a list of roots - p.c is the coefficient array [1,2,3] - p.order is the polynomial order (after leading zeros in p.c are removed) - p[k] is the coefficient on the kth power of x (backwards from - sequencing the coefficient array. + >>> p**2 # square of polynomial + poly1d([ 1, 4, 10, 12, 9]) - polynomials can be added, substracted, multplied and divided (returns - quotient and remainder). - asarray(p) will also give the coefficient array, so polynomials can - be used in all functions that accept arrays. + >>> np.square(p) # square of individual coefficients + array([1, 4, 9]) + + The variable used in the string representation of `p` can be modified, + using the `variable` parameter: + + >>> p = np.poly1d([1,2,3], variable='z') + >>> print p + 2 + 1 z + 2 z + 3 + + Construct a polynomial from its roots: + + >>> np.poly1d([1, 2], True) + poly1d([ 1, -3, 2]) + + This is the same polynomial as obtained by: + + >>> np.poly1d([1, -1]) * np.poly1d([1, -2]) + poly1d([ 1, -3, 2]) - p = poly1d([1,2,3], variable='lambda') will use lambda in the - string representation of p. """ coeffs = None order = None @@ -686,13 +955,28 @@ class poly1d(object): return iter(self.coeffs) def integ(self, m=1, k=0): - """Return the mth analytical integral of this polynomial. - See the documentation for polyint. + """ + Return an antiderivative (indefinite integral) of this polynomial. + + Refer to `polyint` for full documentation. + + See Also + -------- + polyint : equivalent function + """ return poly1d(polyint(self.coeffs, m=m, k=k)) def deriv(self, m=1): - """Return the mth derivative of this polynomial. + """ + Return a derivative of this polynomial. + + Refer to `polyder` for full documentation. + + See Also + -------- + polyder : equivalent function + """ return poly1d(polyder(self.coeffs, m=m)) diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index afdb879e4..8f6073bd5 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -9,9 +9,47 @@ from numpy.core.numeric import asarray, zeros, newaxis, outer, \ from numpy.core.fromnumeric import product, reshape def apply_along_axis(func1d,axis,arr,*args): - """ Execute func1d(arr[i],*args) where func1d takes 1-D arrays - and arr is an N-d array. i varies so as to apply the function - along the given axis for each 1-d subarray in arr. + """ + Apply function to 1-D slices along the given axis. + + Execute `func1d(arr[i],*args)` where `func1d` takes 1-D arrays, `arr` is + the input array, and `i` is an integer that varies in order to apply the + function along the given axis for each 1-D subarray in `arr`. + + Parameters + ---------- + func1d : function + This function should be able to take 1-D arrays. It is applied to 1-D + slices of `arr` along the specified axis. + axis : integer + Axis along which `func1d` is applied. + arr : ndarray + Input array. + args : any + Additional arguments to `func1d`. + + Returns + ------- + outarr : ndarray + The output array. The shape of `outarr` depends on the return + value of `func1d`. If it returns arrays with the same shape as the + input arrays it receives, `outarr` has the same shape as `arr`. + + See Also + -------- + apply_over_axes : Apply a function repeatedly over multiple axes. + + Examples + -------- + >>> def my_func(a): + ... \"\"\"Average first and last element of a 1-D array\"\"\" + ... return (a[0] + a[-1]) * 0.5 + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> np.apply_along_axis(my_func, 0, b) + array([4., 5., 6.]) + >>> np.apply_along_axis(my_func, 1, b) + array([2., 5., 8.]) + """ arr = asarray(arr) nd = arr.ndim @@ -71,12 +109,58 @@ def apply_along_axis(func1d,axis,arr,*args): def apply_over_axes(func, a, axes): - """Apply a function repeatedly over multiple axes, keeping the same shape - for the resulting array. + """ + Apply a function repeatedly over multiple axes. + + `func` is called as `res = func(a, axis)`, with `axis` the first element + of `axes`. The result `res` of the function call has to have + the same or one less dimension(s) as `a`. If `res` has one less dimension + than `a`, a dimension is then inserted before `axis`. + The call to `func` is then repeated for each axis in `axes`, + with `res` as the first argument. + + Parameters + ---------- + func : function + This function should take two arguments, `func(a, axis)`. + arr : ndarray + Input array. + axes : array_like + Axes over which `func` has to be applied, the elements should be + integers. + + Returns + ------- + val : ndarray + The output array. The number of dimensions is the same as `a`, + the shape can be different, this depends on whether `func` changes + the shape of its output with respect to its input. + + See Also + -------- + apply_along_axis : + Apply a function to 1-D slices of an array along the given axis. + + Examples + -------- + >>> a = np.arange(24).reshape(2,3,4) + >>> a + array([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]]) + + Sum over axes 0 and 2. The result has same number of dimensions + as the original array: + + >>> np.apply_over_axes(np.sum, a, [0,2]) + array([[[ 60], + [ 92], + [124]]]) - func is called as res = func(a, axis). The result is assumed - to be either the same shape as a or have one less dimension. - This call is repeated for each axis in the axes sequence. """ val = asarray(a) N = a.ndim @@ -98,7 +182,55 @@ def apply_over_axes(func, a, axes): return val def expand_dims(a, axis): - """Expand the shape of a by including newaxis before given axis. + """ + Expand the shape of an array. + + Insert a new axis, corresponding to a given position in the array shape. + + Parameters + ---------- + a : array_like + Input array. + axis : int + Position (amongst axes) where new axis is to be inserted. + + Returns + ------- + res : ndarray + Output array. The number of dimensions is one greater than that of + the input array. + + See Also + -------- + doc.indexing, atleast_1d, atleast_2d, atleast_3d + + Examples + -------- + >>> x = np.array([1,2]) + >>> x.shape + (2,) + + The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``: + + >>> y = np.expand_dims(x, axis=0) + >>> y + array([[1, 2]]) + >>> y.shape + (1, 2) + + >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis] + >>> y + array([[1], + [2]]) + >>> y.shape + (2, 1) + + Note that some examples may use ``None`` instead of ``np.newaxis``. These + are the same objects: + + >>> np.newaxis is None + True + """ a = asarray(a) shape = a.shape @@ -108,16 +240,43 @@ def expand_dims(a, axis): def atleast_1d(*arys): - """ Force a sequence of arrays to each be at least 1D. + """ + Convert inputs to arrays with at least one dimension. + + Scalar inputs are converted to 1-dimensional arrays, whilst + higher-dimensional inputs are preserved. + + Parameters + ---------- + array1, array2, ... : array_like + One or more input arrays. + + Returns + ------- + ret : ndarray + An array, or sequence of arrays, each with ``a.ndim >= 1``. + Copies are made only if necessary. + + See Also + -------- + atleast_2d, atleast_3d + + Examples + -------- + >>> np.atleast_1d(1.0) + array([ 1.]) + + >>> x = np.arange(9.0).reshape(3,3) + >>> np.atleast_1d(x) + array([[ 0., 1., 2.], + [ 3., 4., 5.], + [ 6., 7., 8.]]) + >>> np.atleast_1d(x) is x + True + + >>> np.atleast_1d(1, [3, 4]) + [array([1]), array([3, 4])] - Description: - Force an array to be at least 1D. If an array is 0D, the - array is converted to a single row of values. Otherwise, - the array is unaltered. - Arguments: - *arys -- arrays to be converted to 1 or more dimensional array. - Returns: - input array converted to at least 1D array. """ res = [] for ary in arys: @@ -128,16 +287,41 @@ def atleast_1d(*arys): return res def atleast_2d(*arys): - """ Force a sequence of arrays to each be at least 2D. + """ + View inputs as arrays with at least two dimensions. + + Parameters + ---------- + array1, array2, ... : array_like + One or more array-like sequences. Non-array inputs are converted + to arrays. Arrays that already have two or more dimensions are + preserved. + + Returns + ------- + res, res2, ... : ndarray + An array, or tuple of arrays, each with ``a.ndim >= 2``. + Copies are avoided where possible, and views with two or more + dimensions are returned. + + See Also + -------- + atleast_1d, atleast_3d + + Examples + -------- + >>> numpy.atleast_2d(3.0) + array([[ 3.]]) + + >>> x = numpy.arange(3.0) + >>> numpy.atleast_2d(x) + array([[ 0., 1., 2.]]) + >>> numpy.atleast_2d(x).base is x + True + + >>> np.atleast_2d(1, [1, 2], [[1, 2]]) + [array([[1]]), array([[1, 2]]), array([[1, 2]])] - Description: - Force an array to each be at least 2D. If the array - is 0D or 1D, the array is converted to a single - row of values. Otherwise, the array is unaltered. - Arguments: - arys -- arrays to be converted to 2 or more dimensional array. - Returns: - input array converted to at least 2D array. """ res = [] for ary in arys: @@ -148,19 +332,54 @@ def atleast_2d(*arys): return res def atleast_3d(*arys): - """ Force a sequence of arrays to each be at least 3D. - - Description: - Force an array each be at least 3D. If the array is 0D or 1D, - the array is converted to a single 1xNx1 array of values where - N is the orginal length of the array. If the array is 2D, the - array is converted to a single MxNx1 array of values where MxN - is the orginal shape of the array. Otherwise, the array is - unaltered. - Arguments: - arys -- arrays to be converted to 3 or more dimensional array. - Returns: - input array converted to at least 3D array. + """ + View inputs as arrays with at least three dimensions. + + Parameters + ---------- + array1, array2, ... : array_like + One or more array-like sequences. Non-array inputs are converted + to arrays. Arrays that already have three or more dimensions are + preserved. + + Returns + ------- + res1, res2, ... : ndarray + An array, or tuple of arrays, each with ``a.ndim >= 3``. + Copies are avoided where possible, and views with three or more + dimensions are returned. For example, a one-dimensional array of + shape ``N`` becomes a view of shape ``(1, N, 1)``. An ``(M, N)`` + array becomes a view of shape ``(N, M, 1)``. + + See Also + -------- + numpy.atleast_1d, numpy.atleast_2d + + Examples + -------- + >>> numpy.atleast_3d(3.0) + array([[[ 3.]]]) + + >>> x = numpy.arange(3.0) + >>> numpy.atleast_3d(x).shape + (1, 3, 1) + + >>> x = numpy.arange(12.0).reshape(4,3) + >>> numpy.atleast_3d(x).shape + (4, 3, 1) + >>> numpy.atleast_3d(x).base is x + True + + >>> for arr in np.atleast_3d(1, [1, 2], [[1, 2]]): print arr, "\\n" + ... + [[[1]]] + + [[[1] + [2]]] + + [[[1] + [2]]] + """ res = [] for ary in arys: @@ -181,57 +400,78 @@ def atleast_3d(*arys): def vstack(tup): - """ Stack arrays in sequence vertically (row wise) - - Description: - Take a sequence of arrays and stack them vertically - to make a single array. All arrays in the sequence - must have the same shape along all but the first axis. - vstack will rebuild arrays divided by vsplit. - Arguments: - tup -- sequence of arrays. All arrays must have the same - shape. - Examples: - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.vstack((a,b)) - array([[1, 2, 3], - [2, 3, 4]]) - >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[2],[3],[4]]) - >>> np.vstack((a,b)) - array([[1], - [2], - [3], - [2], - [3], - [4]]) + """ + Stack arrays vertically. + + `vstack` can be used to rebuild arrays divided by `vsplit`. + + Parameters + ---------- + tup : sequence of arrays + Tuple containing arrays to be stacked. The arrays must have the same + shape along all but the first axis. + + See Also + -------- + array_split : Split an array into a list of multiple sub-arrays of + near-equal size. + split : Split array into a list of multiple sub-arrays of equal size. + vsplit : Split array into a list of multiple sub-arrays vertically. + dsplit : Split array into a list of multiple sub-arrays along the 3rd axis + (depth). + concatenate : Join arrays together. + hstack : Stack arrays in sequence horizontally (column wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + + Examples + -------- + >>> a = np.array([1, 2, 3]) + >>> b = np.array([2, 3, 4]) + >>> np.vstack((a,b)) + array([[1, 2, 3], + [2, 3, 4]]) + >>> a = np.array([[1], [2], [3]]) + >>> b = np.array([[2], [3], [4]]) + >>> np.vstack((a,b)) + array([[1], + [2], + [3], + [2], + [3], + [4]]) """ return _nx.concatenate(map(atleast_2d,tup),0) def hstack(tup): - """ Stack arrays in sequence horizontally (column wise) - - Description: - Take a sequence of arrays and stack them horizontally - to make a single array. All arrays in the sequence - must have the same shape along all but the second axis. - hstack will rebuild arrays divided by hsplit. - Arguments: - tup -- sequence of arrays. All arrays must have the same - shape. - Examples: - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.hstack((a,b)) - array([1, 2, 3, 2, 3, 4]) - >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[2],[3],[4]]) - >>> np.hstack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) + """ + Stack arrays in sequence horizontally (column wise) + + Take a sequence of arrays and stack them horizontally to make + a single array. hstack will rebuild arrays divided by hsplit. + + Parameters + ---------- + tup : sequence of ndarrays + All arrays must have the same shape along all but the second axis. + + Returns + ------- + stacked : ndarray + Ndarray formed by stacking the given arrays. + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.hstack((a,b)) + array([1, 2, 3, 2, 3, 4]) + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[2],[3],[4]]) + >>> np.hstack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) """ return _nx.concatenate(map(atleast_1d,tup),1) @@ -239,25 +479,27 @@ def hstack(tup): row_stack = vstack def column_stack(tup): - """ Stack 1D arrays as columns into a 2D array - - Description: - Take a sequence of 1D arrays and stack them as columns - to make a single 2D array. All arrays in the sequence - must have the same first dimension. 2D arrays are - stacked as-is, just like with hstack. 1D arrays are turned - into 2D columns first. - - Arguments: - tup -- sequence of 1D or 2D arrays. All arrays must have the same - first dimension. - Examples: - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.column_stack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) + """ + Stack 1-D arrays as columns into a 2-D array + + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with hstack. 1-D arrays are turned into 2-D columns + first. + + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) """ arrays = [] @@ -269,32 +511,35 @@ def column_stack(tup): return _nx.concatenate(arrays,1) def dstack(tup): - """ Stack arrays in sequence depth wise (along third dimension) - - Description: - Take a sequence of arrays and stack them along the third axis. - All arrays in the sequence must have the same shape along all - but the third axis. This is a simple way to stack 2D arrays - (images) into a single 3D array for processing. - dstack will rebuild arrays divided by dsplit. - Arguments: - tup -- sequence of arrays. All arrays must have the same - shape. - Examples: - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.dstack((a,b)) - array([[[1, 2], - [2, 3], - [3, 4]]]) - >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[2],[3],[4]]) - >>> np.dstack((a,b)) - array([[[1, 2]], - - [[2, 3]], - - [[3, 4]]]) + """ + Stack arrays in sequence depth wise (along third dimension) + + Take a sequence of arrays and stack them along the third axis. + This is a simple way to stack 2D arrays (images) into a single + 3D array for processing. dstack will rebuild arrays divided by dsplit. + + Parameters + ---------- + tup : sequence of arrays + Arrays to stack. All of them must have the same shape along all + but the third axis. + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.dstack((a,b)) + array([[[1, 2], + [2, 3], + [3, 4]]]) + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[2],[3],[4]]) + >>> np.dstack((a,b)) + array([[[1, 2]], + + [[2, 3]], + + [[3, 4]]]) """ return _nx.concatenate(map(atleast_3d,tup),2) @@ -308,32 +553,23 @@ def _replace_zero_by_x_arrays(sub_arys): return sub_arys def array_split(ary,indices_or_sections,axis = 0): - """ Divide an array into a list of sub-arrays. - - Description: - Divide ary into a list of sub-arrays along the - specified axis. If indices_or_sections is an integer, - ary is divided into that many equally sized arrays. - If it is impossible to make an equal split, each of the - leading arrays in the list have one additional member. If - indices_or_sections is a list of sorted integers, its - entries define the indexes where ary is split. - - Arguments: - ary -- N-D array. - Array to be divided into sub-arrays. - indices_or_sections -- integer or 1D array. - If integer, defines the number of (close to) equal sized - sub-arrays. If it is a 1D array of sorted indices, it - defines the indexes at which ary is divided. Any empty - list results in a single sub-array equal to the original - array. - axis -- integer. default=0. - Specifies the axis along which to split ary. - Caveats: - Currently, the default for axis is 0. This - means a 2D array is divided into multiple groups - of rows. This seems like the appropriate default, + """ + Split an array into multiple sub-arrays of equal or near-equal size. + + Please refer to the `numpy.split` documentation. The only difference + between these functions is that `array_split` allows `indices_or_sections` + to be an integer that does *not* equally divide the axis. + + See Also + -------- + numpy.split : Split array into multiple sub-arrays. + + Examples + -------- + >>> x = np.arange(8.0) + >>> np.array_split(x, 3) + [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])] + """ try: Ntotal = ary.shape[axis] @@ -367,33 +603,70 @@ def array_split(ary,indices_or_sections,axis = 0): return sub_arys def split(ary,indices_or_sections,axis=0): - """ Divide an array into a list of sub-arrays. - - Description: - Divide ary into a list of sub-arrays along the - specified axis. If indices_or_sections is an integer, - ary is divided into that many equally sized arrays. - If it is impossible to make an equal split, an error is - raised. This is the only way this function differs from - the array_split() function. If indices_or_sections is a - list of sorted integers, its entries define the indexes - where ary is split. - - Arguments: - ary -- N-D array. - Array to be divided into sub-arrays. - indices_or_sections -- integer or 1D array. - If integer, defines the number of (close to) equal sized - sub-arrays. If it is a 1D array of sorted indices, it - defines the indexes at which ary is divided. Any empty - list results in a single sub-array equal to the original - array. - axis -- integer. default=0. - Specifies the axis along which to split ary. - Caveats: - Currently, the default for axis is 0. This - means a 2D array is divided into multiple groups - of rows. This seems like the appropriate default + """ + Split an array into multiple sub-arrays of equal size. + + Parameters + ---------- + ary : ndarray + Array to be divided into sub-arrays. + indices_or_sections: integer or 1D array + If `indices_or_sections` is an integer, N, the array will be divided + into N equal arrays along `axis`. If such a split is not possible, + an error is raised. + + If `indices_or_sections` is a 1D array of sorted integers, the entries + indicate where along `axis` the array is split. For example, + ``[2, 3]`` would, for ``axis = 0``, result in + + - ary[:2] + - ary[2:3] + - ary[3:] + + If an index exceeds the dimension of the array along `axis`, + an empty sub-array is returned correspondingly. + axis : integer, optional + The axis along which to split. Default is 0. + + Returns + ------- + sub-arrays : list + A list of sub-arrays. + + Raises + ------ + ValueError + If `indices_or_sections` is given as an integer, but + a split does not result in equal division. + + See Also + -------- + array_split : Split an array into multiple sub-arrays of equal or + near-equal size. Does not raise an exception if + an equal division cannot be made. + hsplit : Split array into multiple sub-arrays horizontally (column-wise). + vsplit : Split array into multiple sub-arrays vertically (row wise). + dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). + concatenate : Join arrays together. + hstack : Stack arrays in sequence horizontally (column wise). + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + + Examples + -------- + >>> x = np.arange(9.0) + >>> np.split(x, 3) + [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])] + + >>> x = np.arange(8.0) + >>> np.split(x, [3, 5, 6, 10]) + + [array([ 0., 1., 2.]), + array([ 3., 4.]), + array([ 5.]), + array([ 6., 7.]), + array([], dtype=float64)] + """ try: len(indices_or_sections) except TypeError: @@ -405,38 +678,41 @@ def split(ary,indices_or_sections,axis=0): return res def hsplit(ary,indices_or_sections): - """ Split ary into multiple columns of sub-arrays - - Description: - Split a single array into multiple sub arrays. The array is - divided into groups of columns. If indices_or_sections is - an integer, ary is divided into that many equally sized sub arrays. - If it is impossible to make the sub-arrays equally sized, the - operation throws a ValueError exception. See array_split and - split for other options on indices_or_sections. - Arguments: - ary -- N-D array. - Array to be divided into sub-arrays. - indices_or_sections -- integer or 1D array. - If integer, defines the number of (close to) equal sized - sub-arrays. If it is a 1D array of sorted indices, it - defines the indexes at which ary is divided. Any empty - list results in a single sub-array equal to the original - array. - Returns: - sequence of sub-arrays. The returned arrays have the same - number of dimensions as the input array. - Related: - hstack, split, array_split, vsplit, dsplit. - Examples: - >>> a= np.array((1,2,3,4)) - >>> np.hsplit(a,2) - [array([1, 2]), array([3, 4])] - >>> a = np.array([[1,2,3,4],[1,2,3,4]]) - >>> np.hsplit(a,2) - [array([[1, 2], - [1, 2]]), array([[3, 4], - [3, 4]])] + """ + Split array into multiple sub-arrays horizontally. + + Please refer to the `numpy.split` documentation. `hsplit` is + equivalent to `numpy.split` with ``axis = 1``. + + See Also + -------- + split : Split array into multiple sub-arrays. + + Examples + -------- + >>> x = np.arange(16.0).reshape(4, 4) + >>> np.hsplit(x, 2) + + [array([[ 0., 1.], + [ 4., 5.], + [ 8., 9.], + [ 12., 13.]]), + array([[ 2., 3.], + [ 6., 7.], + [ 10., 11.], + [ 14., 15.]])] + + >>> np.hsplit(x, array([3, 6])) + + [array([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [ 12., 13., 14.]]), + array([[ 3.], + [ 7.], + [ 11.], + [ 15.]]), + array([], dtype=float64)] """ if len(_nx.shape(ary)) == 0: @@ -447,41 +723,15 @@ def hsplit(ary,indices_or_sections): return split(ary,indices_or_sections,0) def vsplit(ary,indices_or_sections): - """ Split ary into multiple rows of sub-arrays - - Description: - Split a single array into multiple sub arrays. The array is - divided into groups of rows. If indices_or_sections is - an integer, ary is divided into that many equally sized sub arrays. - If it is impossible to make the sub-arrays equally sized, the - operation throws a ValueError exception. See array_split and - split for other options on indices_or_sections. - Arguments: - ary -- N-D array. - Array to be divided into sub-arrays. - indices_or_sections -- integer or 1D array. - If integer, defines the number of (close to) equal sized - sub-arrays. If it is a 1D array of sorted indices, it - defines the indexes at which ary is divided. Any empty - list results in a single sub-array equal to the original - array. - Returns: - sequence of sub-arrays. The returned arrays have the same - number of dimensions as the input array. - Caveats: - How should we handle 1D arrays here? I am currently raising - an error when I encounter them. Any better approach? - - Should we reduce the returned array to their minium dimensions - by getting rid of any dimensions that are 1? - Related: - vstack, split, array_split, hsplit, dsplit. - Examples: - import numpy - >>> a = np.array([[1,2,3,4], - ... [1,2,3,4]]) - >>> np.vsplit(a,2) - [array([[1, 2, 3, 4]]), array([[1, 2, 3, 4]])] + """ + Split array into multiple sub-arrays vertically. + + Please refer to the `numpy.split` documentation. + + See Also + -------- + numpy.split : The default behaviour of this function implements + `vsplit`. """ if len(_nx.shape(ary)) < 2: @@ -489,37 +739,79 @@ def vsplit(ary,indices_or_sections): return split(ary,indices_or_sections,0) def dsplit(ary,indices_or_sections): - """ Split ary into multiple sub-arrays along the 3rd axis (depth) - - Description: - Split a single array into multiple sub arrays. The array is - divided into groups along the 3rd axis. If indices_or_sections is - an integer, ary is divided into that many equally sized sub arrays. - If it is impossible to make the sub-arrays equally sized, the - operation throws a ValueError exception. See array_split and - split for other options on indices_or_sections. - Arguments: - ary -- N-D array. - Array to be divided into sub-arrays. - indices_or_sections -- integer or 1D array. - If integer, defines the number of (close to) equal sized - sub-arrays. If it is a 1D array of sorted indices, it - defines the indexes at which ary is divided. Any empty - list results in a single sub-array equal to the original - array. - Returns: - sequence of sub-arrays. The returned arrays have the same - number of dimensions as the input array. - Caveats: - See vsplit caveats. - Related: - dstack, split, array_split, hsplit, vsplit. - Examples: - >>> a = np.array([[[1,2,3,4],[1,2,3,4]]]) - >>> np.dsplit(a,2) - [array([[[1, 2], - [1, 2]]]), array([[[3, 4], - [3, 4]]])] + """ + Split array into multiple sub-arrays along the 3rd axis (depth). + + Parameters + ---------- + ary : ndarray + An array, with at least 3 dimensions, to be divided into sub-arrays + depth-wise, or along the third axis. + indices_or_sections: integer or 1D array + If `indices_or_sections` is an integer, N, the array will be divided + into N equal arrays along `axis`. If an equal split is not possible, + a ValueError is raised. + + if `indices_or_sections` is a 1D array of sorted integers representing + indices along `axis`, the array will be divided such that each index + marks the start of each sub-array. If an index exceeds the dimension of + the array along `axis`, and empty sub-array is returned for that index. + axis : integer, optional + the axis along which to split. Default is 0. + + Returns + ------- + sub-arrays : list + A list of sub-arrays. + + See Also + -------- + array_split : Split an array into a list of multiple sub-arrays + of near-equal size. + split : Split array into a list of multiple sub-arrays of equal size. + hsplit : Split array into a list of multiple sub-arrays horizontally + vsplit : Split array into a list of multiple sub-arrays vertically + concatenate : Join arrays together. + hstack : Stack arrays in sequence horizontally (column wise) + vstack : Stack arrays in sequence vertically (row wise) + dstack : Stack arrays in sequence depth wise (along third dimension) + + Notes + ----- + `dsplit` requires that sub-arrays are of equal shape, whereas + `array_split` allows for sub-arrays to have nearly-equal shape. + Equivalent to `split` with `axis` = 2. + + Examples + -------- + >>> x = np.arange(16.0).reshape(2, 2, 4) + >>> np.dsplit(x, 2) + + [array([[[ 0., 1.], + [ 4., 5.]], + + [[ 8., 9.], + [ 12., 13.]]]), + array([[[ 2., 3.], + [ 6., 7.]], + + [[ 10., 11.], + [ 14., 15.]]])] + + >>> x = np.arange(16.0).reshape(2, 2, 4) + >>> np.dsplit(x, array([3, 6])) + + [array([[[ 0., 1., 2.], + [ 4., 5., 6.]], + + [[ 8., 9., 10.], + [ 12., 13., 14.]]]), + array([[[ 3.], + [ 7.]], + + [[ 11.], + [ 15.]]]), + array([], dtype=float64)] """ if len(_nx.shape(ary)) < 3: @@ -540,12 +832,74 @@ def get_array_wrap(*args): return None def kron(a,b): - """kronecker product of a and b + """ + Kronecker product of two arrays. + + Computes the Kronecker product, a composite array made of blocks of the + second array scaled by the first. + + Parameters + ---------- + a, b : array_like + + Returns + ------- + out : ndarray + + See Also + -------- + + outer : The outer product + + Notes + ----- + + The function assumes that the number of dimenensions of `a` and `b` + are the same, if necessary prepending the smallest with ones. + If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`, + the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`. + The elements are products of elements from `a` and `b`, organized + explicitly by:: + + kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN] + + where:: + + kt = it * st + jt, t = 0,...,N + + In the common 2-D case (N=1), the block structure can be visualized:: + + [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ], + [ ... ... ], + [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]] + + + Examples + -------- + >>> np.kron([1,10,100], [5,6,7]) + array([ 5, 6, 7, 50, 60, 70, 500, 600, 700]) + >>> np.kron([5,6,7], [1,10,100]) + array([ 5, 50, 500, 6, 60, 600, 7, 70, 700]) + + >>> np.kron(np.eye(2), np.ones((2,2))) + array([[ 1., 1., 0., 0.], + [ 1., 1., 0., 0.], + [ 0., 0., 1., 1.], + [ 0., 0., 1., 1.]]) + + >>> a = np.arange(100).reshape((2,5,2,5)) + >>> b = np.arange(24).reshape((2,3,4)) + >>> c = np.kron(a,b) + >>> c.shape + (2, 10, 6, 20) + >>> I = (1,3,0,2) + >>> J = (0,2,1) + >>> J1 = (0,) + J # extend to ndim=4 + >>> S1 = (1,) + b.shape + >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1)) + >>> C[K] == A[I]*B[J] + True - Kronecker product of two arrays is block array - [[ a[ 0 ,0]*b, a[ 0 ,1]*b, ... , a[ 0 ,n-1]*b ], - [ ... ... ], - [ a[m-1,0]*b, a[m-1,1]*b, ... , a[m-1,n-1]*b ]] """ wrapper = get_array_wrap(a, b) b = asanyarray(b) @@ -576,37 +930,61 @@ def kron(a,b): def tile(A, reps): - """Repeat an array the number of times given in the integer tuple, reps. - - If reps has length d, the result will have dimension of max(d, A.ndim). - If reps is scalar it is treated as a 1-tuple. - - If A.ndim < d, A is promoted to be d-dimensional by prepending new axes. - So a shape (3,) array is promoted to (1,3) for 2-D replication, - or shape (1,1,3) for 3-D replication. - If this is not the desired behavior, promote A to d-dimensions manually - before calling this function. - - If d < A.ndim, tup is promoted to A.ndim by pre-pending 1's to it. Thus - for an A.shape of (2,3,4,5), a tup of (2,2) is treated as (1,1,2,2) - - - Examples: - >>> a = np.array([0,1,2]) - >>> np.tile(a,2) + """ + Construct an array by repeating A the number of times given by reps. + + Parameters + ---------- + A : array_like + The input array. + reps : array_like + The number of repetitions of `A` along each axis. + + Returns + ------- + c : ndarray + The output array. + + See Also + -------- + repeat + + Notes + ----- + If `reps` has length d, the result will have dimension of max(d, `A`.ndim). + + If `A`.ndim < d, `A` is promoted to be d-dimensional by prepending new + axes. So a shape (3,) array is promoted to (1,3) for 2-D replication, + or shape (1,1,3) for 3-D replication. If this is not the desired behavior, + promote `A` to d-dimensions manually before calling this function. + + If `A`.ndim > d, `reps` is promoted to `A`.ndim by pre-pending 1's to it. + Thus for an `A` of shape (2,3,4,5), a `reps` of (2,2) is treated as + (1,1,2,2). + + Examples + -------- + >>> a = np.array([0, 1, 2]) + >>> np.tile(a, 2) array([0, 1, 2, 0, 1, 2]) - >>> np.tile(a,(1,2)) - array([[0, 1, 2, 0, 1, 2]]) - >>> np.tile(a,(2,2)) + >>> np.tile(a, (2, 2)) array([[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) - >>> np.tile(a,(2,1,2)) + >>> np.tile(a, (2, 1, 2)) array([[[0, 1, 2, 0, 1, 2]], [[0, 1, 2, 0, 1, 2]]]) - See Also: - repeat + >>> b = np.array([[1, 2], [3, 4]]) + >>> np.tile(b, 2) + array([[1, 2, 1, 2], + [3, 4, 3, 4]]) + >>> np.tile(b, (2, 1)) + array([[1, 2], + [3, 4], + [1, 2], + [3, 4]]) + """ try: tup = tuple(reps) diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py index 25987362f..8006b25d1 100644 --- a/numpy/lib/stride_tricks.py +++ b/numpy/lib/stride_tricks.py @@ -23,11 +23,13 @@ def as_strided(x, shape=None, strides=None): return np.asarray(DummyArray(interface, base=x)) def broadcast_arrays(*args): - """ Broadcast any number of arrays against each other. + """ + Broadcast any number of arrays against each other. Parameters ---------- - *args : arrays + `*args` : arrays + The arrays to broadcast. Returns ------- diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index ab1e5fcf0..d779009e7 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -9,8 +9,49 @@ from numpy.core.numeric import asanyarray, equal, subtract, arange, \ zeros, arange, greater_equal, multiply, ones, asarray def fliplr(m): - """ returns an array m with the rows preserved and columns flipped - in the left/right direction. Works on the first two dimensions of m. + """ + Left-right flip. + + Flip the entries in each row in the left/right direction. + Columns are preserved, but appear in a different order than before. + + Parameters + ---------- + m : array_like + Input array. + + Returns + ------- + f : ndarray + A view of `m` with the columns reversed. Since a view + is returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + flipud : Flip array in the up/down direction. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to A[::-1,...]. Does not require the array to be + two-dimensional. + + Examples + -------- + >>> A = np.diag([1.,2.,3.]) + >>> A + array([[ 1., 0., 0.], + [ 0., 2., 0.], + [ 0., 0., 3.]]) + >>> np.fliplr(A) + array([[ 0., 0., 1.], + [ 0., 2., 0.], + [ 3., 0., 0.]]) + + >>> A = np.random.randn(2,3,5) + >>> np.all(numpy.fliplr(A)==A[:,::-1,...]) + True + """ m = asanyarray(m) if m.ndim < 2: @@ -18,8 +59,47 @@ def fliplr(m): return m[:, ::-1] def flipud(m): - """ returns an array with the columns preserved and rows flipped in - the up/down direction. Works on the first dimension of m. + """ + Up-down flip. + + Flip the entries in each column in the up/down direction. + Rows are preserved, but appear in a different order than before. + + Parameters + ---------- + m : array_like + Input array. + + Returns + ------- + out : array_like + A view of `m` with the rows reversed. Since a view is + returned, this operation is :math:`\\mathcal O(1)`. + + Notes + ----- + Equivalent to ``A[::-1,...]``. + Does not require the array to be two-dimensional. + + Examples + -------- + >>> A = np.diag([1.0, 2, 3]) + >>> A + array([[ 1., 0., 0.], + [ 0., 2., 0.], + [ 0., 0., 3.]]) + >>> np.flipud(A) + array([[ 0., 0., 3.], + [ 0., 2., 0.], + [ 1., 0., 0.]]) + + >>> A = np.random.randn(2,3,5) + >>> np.all(np.flipud(A)==A[::-1,...]) + True + + >>> np.flipud([1,2]) + array([2, 1]) + """ m = asanyarray(m) if m.ndim < 1: @@ -27,9 +107,42 @@ def flipud(m): return m[::-1,...] def rot90(m, k=1): - """ returns the array found by rotating m by k*90 - degrees in the counterclockwise direction. Works on the first two - dimensions of m. + """ + Rotate an array by 90 degrees in the counter-clockwise direction. + + The first two dimensions are rotated; therefore, the array must be at + least 2-D. + + Parameters + ---------- + m : array_like + Array of two or more dimensions. + k : integer + Number of times the array is rotated by 90 degrees. + + Returns + ------- + y : ndarray + Rotated array. + + See Also + -------- + fliplr : Flip an array horizontally. + flipud : Flip an array vertically. + + Examples + -------- + >>> m = np.array([[1,2],[3,4]], int) + >>> m + array([[1, 2], + [3, 4]]) + >>> np.rot90(m) + array([[2, 4], + [1, 3]]) + >>> np.rot90(m, 2) + array([[4, 3], + [2, 1]]) + """ m = asanyarray(m) if m.ndim < 2: @@ -41,8 +154,41 @@ def rot90(m, k=1): else: return fliplr(m.swapaxes(0,1)) # k==3 def eye(N, M=None, k=0, dtype=float): - """ eye returns a N-by-M 2-d array where the k-th diagonal is all ones, - and everything else is zeros. + """ + Return a 2-D array with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the output. + M : int, optional + Number of columns in the output. If None, defaults to `N`. + k : int, optional + Index of the diagonal: 0 refers to the main diagonal, a positive value + refers to an upper diagonal and a negative value to a lower diagonal. + dtype : dtype, optional + Data-type of the returned array. + + Returns + ------- + I : ndarray (N,M) + An array where all elements are equal to zero, except for the k'th + diagonal, whose values are equal to one. + + See Also + -------- + diag : Return a diagonal 2-D array using a 1-D array specified by the user. + + Examples + -------- + >>> np.eye(2, dtype=int) + array([[1, 0], + [0, 1]]) + >>> np.eye(3, k=1) + array([[ 0., 1., 0.], + [ 0., 0., 1.], + [ 0., 0., 0.]]) + """ if M is None: M = N m = equal(subtract.outer(arange(N), arange(M)),-k) @@ -51,9 +197,34 @@ def eye(N, M=None, k=0, dtype=float): return m def diag(v, k=0): - """ returns a copy of the the k-th diagonal if v is a 2-d array - or returns a 2-d array with v as the k-th diagonal if v is a - 1-d array. + """ + Extract a diagonal or construct a diagonal array. + + Parameters + ---------- + v : array_like + If `v` is a 2-dimensional array, return a copy of + its `k`-th diagonal. If `v` is a 1-dimensional array, + return a 2-dimensional array with `v` on the `k`-th diagonal. + k : int, optional + Diagonal in question. The defaults is 0. + + Examples + -------- + >>> x = np.arange(9).reshape((3,3)) + >>> x + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + + >>> np.diag(x) + array([0, 4, 8]) + + >>> np.diag(np.diag(x)) + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 8]]) + """ v = asarray(v) s = v.shape @@ -83,21 +254,30 @@ def diag(v, k=0): raise ValueError, "Input must be 1- or 2-d." def diagflat(v,k=0): - """Return a 2D array whose k'th diagonal is a flattened v and all other - elements are zero. + """ + Create a 2-dimensional array with the flattened input as a diagonal. + + Parameters + ---------- + v : array_like + Input data, which is flattened and set as the `k`-th + diagonal of the output. + k : int, optional + Diagonal to set. The default is 0. Examples -------- - >>> np.diagflat([[1,2],[3,4]]) - array([[1, 0, 0, 0], - [0, 2, 0, 0], - [0, 0, 3, 0], - [0, 0, 0, 4]]) + >>> np.diagflat([[1,2],[3,4]]) + array([[1, 0, 0, 0], + [0, 2, 0, 0], + [0, 0, 3, 0], + [0, 0, 0, 4]]) + + >>> np.diagflat([1,2], 1) + array([[0, 1, 0], + [0, 0, 2], + [0, 0, 0]]) - >>> np.diagflat([1,2], 1) - array([[0, 1, 0], - [0, 0, 2], - [0, 0, 0]]) """ try: wrap = v.__array_wrap__ @@ -119,24 +299,102 @@ def diagflat(v,k=0): return wrap(res) def tri(N, M=None, k=0, dtype=float): - """ returns a N-by-M array where all the diagonals starting from - lower left corner up to the k-th are all ones. + """ + Construct an array filled with ones at and below the given diagonal. + + Parameters + ---------- + N : int + Number of rows in the array. + M : int, optional + Number of columns in the array. + By default, `M` is taken to equal to `N`. + k : int, optional + The sub-diagonal below which the array is filled. + ``k = 0`` is the main diagonal, while ``k < 0`` is below it, + and ``k > 0`` is above. The default is 0. + dtype : dtype, optional + Data type of the returned array. The default is `float`. + + Returns + ------- + T : (N,M) ndarray + Array with a lower triangle filled with ones, in other words + ``T[i,j] == 1`` for ``i <= j + k``. + + Examples + -------- + >>> np.tri(3, 5, 2, dtype=int) + array([[1, 1, 1, 0, 0], + [1, 1, 1, 1, 0], + [1, 1, 1, 1, 1]]) + + >>> np.tri(3, 5, -1) + array([[ 0., 0., 0., 0., 0.], + [ 1., 0., 0., 0., 0.], + [ 1., 1., 0., 0., 0.]]) + """ if M is None: M = N m = greater_equal(subtract.outer(arange(N), arange(M)),-k) return m.astype(dtype) def tril(m, k=0): - """ returns the elements on and below the k-th diagonal of m. k=0 is the - main diagonal, k > 0 is above and k < 0 is below the main diagonal. + """ + Lower triangular. + + Return a copy of an array with elements above the k-th diagonal zeroed. + + Parameters + ---------- + m : array-like, shape (M, N) + Input array. + k : int + Diagonal above which to zero elements. + `k = 0` is the main diagonal, `k < 0` is below it and `k > 0` is above. + + Returns + ------- + L : ndarray, shape (M, N) + Lower triangle of `m`, of same shape and data-type as `m`. + + See Also + -------- + triu + + Examples + -------- + >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 0, 0, 0], + [ 4, 0, 0], + [ 7, 8, 0], + [10, 11, 12]]) + """ m = asanyarray(m) out = multiply(tri(m.shape[0], m.shape[1], k=k, dtype=int),m) return out def triu(m, k=0): - """ returns the elements on and above the k-th diagonal of m. k=0 is the - main diagonal, k > 0 is above and k < 0 is below the main diagonal. + """ + Upper triangular. + + Construct a copy of a matrix with elements below the k-th diagonal zeroed. + + Please refer to the documentation for `tril`. + + See Also + -------- + tril + + Examples + -------- + >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 1, 2, 3], + [ 4, 5, 6], + [ 0, 8, 9], + [ 0, 0, 12]]) + """ m = asanyarray(m) out = multiply((1-tri(m.shape[0], m.shape[1], k-1, int)),m) @@ -145,10 +403,40 @@ def triu(m, k=0): # borrowed from John Hunter and matplotlib def vander(x, N=None): """ - Generate the Vandermonde matrix of vector x. + Generate a Van der Monde matrix. - The i-th column of X is the the (N-i)-1-th power of x. N is the - maximum power to compute; if N is None it defaults to len(x). + The columns of the output matrix are decreasing powers of the input + vector. Specifically, the i-th output column is the input vector to + the power of ``N - i - 1``. + + Parameters + ---------- + x : array_like + Input array. + N : int, optional + Order of (number of columns in) the output. + + Returns + ------- + out : ndarray + Van der Monde matrix of order `N`. The first column is ``x^(N-1)``, + the second ``x^(N-2)`` and so forth. + + Examples + -------- + >>> x = np.array([1, 2, 3, 5]) + >>> N = 3 + >>> np.vander(x, N) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> np.column_stack([x**(N-1-i) for i in range(N)]) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) """ x = asarray(x) @@ -160,30 +448,77 @@ def vander(x, N=None): def histogram2d(x,y, bins=10, range=None, normed=False, weights=None): - """histogram2d(x,y, bins=10, range=None, normed=False) -> H, xedges, yedges - - Compute the 2D histogram from samples x,y. - - :Parameters: - - `x,y` : Sample arrays (1D). - - `bins` : Number of bins -or- [nbin x, nbin y] -or- - [bin edges] -or- [x bin edges, y bin edges]. - - `range` : A sequence of lower and upper bin edges (default: [min, max]). - - `normed` : Boolean, if False, return the number of samples in each bin, - if True, returns the density. - - `weights` : An array of weights. The weights are normed only if normed - is True. Should weights.sum() not equal N, the total bin count \ - will not be equal to the number of samples. - - :Return: - - `hist` : Histogram array. - - `xedges, yedges` : Arrays defining the bin edges. - - Example: - >>> x = np.random.randn(100,2) - >>> hist2d, xedges, yedges = np.lib.histogram2d(x, bins = (6, 7)) - - :SeeAlso: histogramdd + """ + Compute the bidimensional histogram of two data samples. + + Parameters + ---------- + x : array-like (N,) + A sequence of values to be histogrammed along the first dimension. + y : array-like (N,) + A sequence of values to be histogrammed along the second dimension. + bins : int or [int, int] or array-like or [array, array], optional + The bin specification: + + * the number of bins for the two dimensions (nx=ny=bins), + * the number of bins in each dimension (nx, ny = bins), + * the bin edges for the two dimensions (x_edges=y_edges=bins), + * the bin edges in each dimension (x_edges, y_edges = bins). + + range : array-like, (2,2), optional + The leftmost and rightmost edges of the bins along each dimension + (if not specified explicitly in the `bins` parameters): + [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be + considered outliers and not tallied in the histogram. + normed : boolean, optional + If False, returns the number of samples in each bin. If True, returns + the bin density, ie, the bin count divided by the bin area. + weights : array-like (N,), optional + An array of values `w_i` weighing each sample `(x_i, y_i)`. Weights are + normalized to 1 if normed is True. If normed is False, the values of the + returned histogram are equal to the sum of the weights belonging to the + samples falling into each bin. + + Returns + ------- + H : array (nx, ny) + The bidimensional histogram of samples x and y. Values in x are + histogrammed along the first dimension and values in y are histogrammed + along the second dimension. + xedges : array (nx,) + The bin edges along the first dimension. + yedges : array (ny,) + The bin edges along the second dimension. + + See Also + -------- + histogram: 1D histogram + histogramdd: Multidimensional histogram + + Notes + ----- + When normed is True, then the returned histogram is the sample density, + defined such that: + + .. math:: + \\sum_{i=0}^{nx-1} \\sum_{j=0}^{ny-1} H_{i,j} \\Delta x_i \\Delta y_j = 1 + + where :math:`H` is the histogram array and :math:`\\Delta x_i \\Delta y_i` + the area of bin :math:`{i,j}`. + + Please note that the histogram does not follow the cartesian convention + where `x` values are on the abcissa and `y` values on the ordinate axis. + Rather, `x` is histogrammed along the first dimension of the array + (vertical), and `y` along the second dimension of the array (horizontal). + This ensures compatibility with `histogrammdd`. + + Examples + -------- + >>> x,y = np.random.randn(2,100) + >>> H, xedges, yedges = np.histogram2d(x, y, bins = (5, 8)) + >>> H.shape, xedges.shape, yedges.shape + ((5,8), (6,), (9,)) + """ from numpy import histogramdd diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py index 20817ab01..0a78986ac 100644 --- a/numpy/lib/type_check.py +++ b/numpy/lib/type_check.py @@ -49,16 +49,53 @@ def asfarray(a, dtype=_nx.float_): return asarray(a,dtype=dtype) def real(val): - """Return the real part of val. + """ + Return the real part of the elements of the array. + + Parameters + ---------- + val : {array_like, scalar} + Input array. + + Returns + ------- + out : ndarray + If `val` is real, the type of `val` is used for the output. If `val` + has complex elements, the returned type is float. + + See Also + -------- + real_if_close, imag, angle + + Examples + -------- + >>> a = np.array([1+2j,3+4j,5+6j]) + >>> a.real + array([ 1., 3., 5.]) + >>> a.real = 9 + >>> a + array([ 9.+2.j, 9.+4.j, 9.+6.j]) + >>> a.real = np.array([9,8,7]) + >>> a + array([ 9.+2.j, 8.+4.j, 7.+6.j]) - Useful if val maybe a scalar or an array. """ return asanyarray(val).real def imag(val): - """Return the imaginary part of val. + """ + Return the imaginary part of array. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray, real or int + Real part of each element, same shape as `val`. - Useful if val maybe a scalar or an array. """ return asanyarray(val).imag @@ -75,10 +112,26 @@ def iscomplex(x): return +res # convet to array-scalar if needed def isreal(x): - """Return a boolean array where elements are True if that element - is real (has zero imaginary part) + """ + Returns a bool array where True if the corresponding input element is real. + + True if complex part is zero. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray, bool + Boolean array of same shape as `x`. + + Examples + -------- + >>> np.isreal([1+1j, 1+0j, 4.5, 3, 2, 2j]) + >>> array([False, True, True, True, True, False], dtype=bool) - For scalars, return a boolean. """ return imag(x) == 0 @@ -105,12 +158,27 @@ def _getmaxmin(t): def nan_to_num(x): """ - Returns a copy of replacing NaN's with 0 and Infs with large numbers + Replace nan with zero and inf with large numbers. + + Parameters + ---------- + x : array_like + Input data. + + Returns + ------- + out : ndarray + Array with the same shape and dtype as `x`. Nan is replaced + by zero, and inf (-inf) is replaced by the largest (smallest) + floating point value that fits in the output dtype. + + Examples + -------- + >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) + >>> np.nan_to_num(x) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, + -1.28000000e+002, 1.28000000e+002]) - The following mappings are applied: - NaN -> 0 - Inf -> limits.double_max - -Inf -> limits.double_min """ try: t = x.dtype.type @@ -143,10 +211,36 @@ def nan_to_num(x): #----------------------------------------------------------------------------- def real_if_close(a,tol=100): - """If a is a complex array, return it as a real array if the imaginary - part is close enough to zero. + """ + If complex input returns a real array if complex parts are close to zero. + + "Close to zero" is defined as `tol` * (machine epsilon of the type for + `a`). + + Parameters + ---------- + a : {array_like, scalar} + Input array. + tol : scalar + Tolerance for the complex part of the elements in the array. + + Returns + ------- + out : ndarray + If `a` is real, the type of `a` is used for the output. If `a` + has complex elements, the returned type is float. + + See Also + -------- + real, imag, angle + + Notes + ----- + Machine epsilon varies from machine to machine and between data types + but Python floats on most platforms have a machine epsilon equal to + 2.2204460492503131e-16. You can use 'np.finfo(np.float).eps' to print + out the machine epsilon for floats. - "Close enough" is defined as tol*(machine epsilon of a's element type). """ a = asanyarray(a) if not issubclass(a.dtype.type, _nx.complexfloating): @@ -192,7 +286,24 @@ _namefromtype = {'S1' : 'character', } def typename(char): - """Return an english description for the given data type character. + """ + Return a description for the given data type code. + + Parameters + ---------- + char : str + Data type code. + + Returns + ------- + out : str + Description of the input data type code. + + See Also + -------- + typecodes + dtype + """ return _namefromtype[char] @@ -208,11 +319,22 @@ array_precision = {_nx.single : 0, _nx.cdouble : 1, _nx.clongdouble : 2} def common_type(*arrays): - """Given a sequence of arrays as arguments, return the best inexact - scalar type which is "most" common amongst them. + """ + Return the inexact scalar type which is most common in a list of arrays. The return type will always be a inexact scalar type, even if all - the arrays are integer arrays. + the arrays are integer arrays + + Parameters + ---------- + arrays: sequence of array_like + Input sequence of arrays. + + Returns + ------- + out: data type code + Data type code. + """ is_complex = False precision = 0 diff --git a/numpy/lib/ufunclike.py b/numpy/lib/ufunclike.py index a8c2c1e25..37c38e94e 100644 --- a/numpy/lib/ufunclike.py +++ b/numpy/lib/ufunclike.py @@ -24,9 +24,30 @@ def fix(x, y=None): return y def isposinf(x, y=None): - """Return a boolean array y with y[i] True for x[i] = +Inf. + """ + Return True where x is +infinity, and False otherwise. + + Parameters + ---------- + x : array_like + The input array. + y : array_like + A boolean array with the same shape as `x` to store the result. + + Returns + ------- + y : ndarray + A boolean array where y[i] = True only if x[i] = +Inf. + + See Also + -------- + isneginf, isfinite + + Examples + -------- + >>> np.isposinf([-np.inf, 0., np.inf]) + array([ False, False, True], dtype=bool) - If y is an array, the result replaces the contents of y. """ if y is None: x = asarray(x) @@ -35,9 +56,30 @@ def isposinf(x, y=None): return y def isneginf(x, y=None): - """Return a boolean array y with y[i] True for x[i] = -Inf. + """ + Return True where x is -infinity, and False otherwise. + + Parameters + ---------- + x : array_like + The input array. + y : array_like + A boolean array with the same shape as `x` to store the result. + + Returns + ------- + y : ndarray + A boolean array where y[i] = True only if x[i] = -Inf. + + See Also + -------- + isposinf, isfinite + + Examples + -------- + >>> np.isneginf([-np.inf, 0., np.inf]) + array([ True, False, False], dtype=bool) - If y is an array, the result replaces the contents of y. """ if y is None: x = asarray(x) @@ -47,9 +89,32 @@ def isneginf(x, y=None): _log2 = umath.log(2) def log2(x, y=None): - """Returns the base 2 logarithm of x + """ + Return the base 2 logarithm. + + Parameters + ---------- + x : array_like + Input array. + y : array_like + Optional output array with the same shape as `x`. + + Returns + ------- + y : {ndarray, scalar} + The logarithm to the base 2 of `x` elementwise. + NaNs are returned where `x` is negative. + + + See Also + -------- + log, log1p, log10 + + Examples + -------- + >>> np.log2([-1,2,4]) + array([ NaN, 1., 2.]) - If y is an array, the result replaces the contents of y. """ x = asanyarray(x) if y is None: diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index 9d37dc1b5..bb162bfd3 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -24,6 +24,29 @@ def issubsctype(arg1, arg2): return issubclass(obj2sctype(arg1), obj2sctype(arg2)) def issubdtype(arg1, arg2): + """ + Returns True if first argument is a typecode lower/equal in type hierarchy. + + Parameters + ---------- + arg1 : dtype_like + dtype or string representing a typecode. + arg2 : dtype_like + dtype or string representing a typecode. + + + See Also + -------- + numpy.core.numerictypes : Overview of numpy type hierarchy. + + Examples + -------- + >>> np.issubdtype('S1', str) + True + >>> np.issubdtype(np.float64, np.float32) + False + + """ if issubclass_(arg2, generic): return issubclass(_dtype(arg1).type, arg2) mro = _dtype(arg2).type.mro() @@ -34,15 +57,23 @@ def issubdtype(arg1, arg2): return issubclass(_dtype(arg1).type, val) def get_include(): - """Return the directory in the package that contains the numpy/*.h header - files. + """ + Return the directory that contains the numpy \\*.h header files. Extension modules that need to compile against numpy should use this - function to locate the appropriate include directory. Using distutils: + function to locate the appropriate include directory. + + Notes + ----- + When using ``distutils``, for example in ``setup.py``. + :: + + import numpy as np + ... + Extension('extension_name', ... + include_dirs=[np.get_include()]) + ... - import numpy - Extension('extension_name', ... - include_dirs=[numpy.get_include()]) """ import numpy if numpy.show_config is None: @@ -109,6 +140,10 @@ def deprecate(func, oldname=None, newname=None): depdoc = '%s is DEPRECATED!! -- use %s instead' % (oldname, newname,) def newfunc(*args,**kwds): + """ + Use get_include, get_numpy_include is DEPRECATED. + + """ warnings.warn(str1, DeprecationWarning) return func(*args, **kwds) @@ -204,7 +239,40 @@ def may_share_memory(a, b): def who(vardict=None): - """Print the Numpy arrays in the given dictionary (or globals() if None). + """ + Print the Numpy arrays in the given dictionary. + + If there is no dictionary passed in or `vardict` is None then returns + Numpy arrays in the globals() dictionary (all Numpy arrays in the + namespace). + + Parameters + ---------- + vardict : dict, optional + A dictionary possibly containing ndarrays. Default is globals(). + + Returns + ------- + out : None + Returns 'None'. + + Notes + ----- + Prints out the name, shape, bytes and type of all of the ndarrays present + in `vardict`. + + Examples + -------- + >>> d = {'x': arange(2.0), 'y': arange(3.0), 'txt': 'Some str', 'idx': 5} + >>> np.whos(d) + Name Shape Bytes Type + =========================================================== + + y 3 24 float64 + x 2 16 float64 + + Upper bound on total bytes = 40 + """ if vardict is None: frame = sys._getframe().f_back @@ -464,7 +532,18 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'): def source(object, output=sys.stdout): - """Write source for this object to output. + """ + Print or write to a file the source code for a Numpy object. + + Parameters + ---------- + object : numpy object + Input object. + output : file object, optional + If `output` not supplied then source code is printed to screen + (sys.stdout). File object must be created with either write 'w' or + append 'a' modes. + """ # Local import to speed up numpy's import time. import inspect @@ -485,21 +564,31 @@ _function_signature_re = re.compile(r"[a-z_]+\(.*[,=].*\)", re.I) def lookfor(what, module=None, import_modules=True, regenerate=False): """ - Search for objects whose documentation contains all given words. - Shows a summary of matching objects, sorted roughly by relevance. + Do a keyword search on docstrings. + + A list of of objects that matched the search is displayed, + sorted by relevance. Parameters ---------- what : str String containing words to look for. - module : str, module Module whose docstrings to go through. import_modules : bool Whether to import sub-modules in packages. - Will import only modules in __all__ - regenerate: bool - Re-generate the docstring cache + Will import only modules in ``__all__``. + regenerate : bool + Whether to re-generate the docstring cache. + + Examples + -------- + + >>> np.lookfor('binary representation') + Search results for 'binary representation' + ------------------------------------------ + numpy.binary_repr + Return the binary representation of the input number as a string. """ import pydoc @@ -718,7 +807,10 @@ class SafeEval(object): raise SyntaxError("Unknown name: %s" % node.name) def safe_eval(source): - """ Evaluate a string containing a Python literal expression without + """ + Protected string evaluation. + + Evaluate a string containing a Python literal expression without allowing the execution of arbitrary non-literal code. Parameters @@ -731,8 +823,9 @@ def safe_eval(source): Raises ------ - SyntaxError if the code is invalid Python expression syntax or if it - contains non-literal code. + SyntaxError + If the code has invalid Python syntax, or if it contains non-literal + code. Examples -------- @@ -755,6 +848,7 @@ def safe_eval(source): Traceback (most recent call last): ... SyntaxError: Unknown name: dict + """ # Local import to speed up numpy's import time. import compiler -- cgit v1.2.1