summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPauli Virtanen <pav@iki.fi>2009-10-02 19:33:33 +0000
committerPauli Virtanen <pav@iki.fi>2009-10-02 19:33:33 +0000
commit474d013a3b38c5909a7381cfa0cc2c8203807cfa (patch)
treeaf895af917b636c1a0ddcf94a7134052a6d6e55e
parentf1e3392d6d8813ed146ce1675f65a880634f727b (diff)
downloadnumpy-474d013a3b38c5909a7381cfa0cc2c8203807cfa.tar.gz
Docstring update: lib
-rw-r--r--numpy/lib/_datasource.py288
-rw-r--r--numpy/lib/_iotools.py274
-rw-r--r--numpy/lib/arraysetops.py136
-rw-r--r--numpy/lib/arrayterator.py81
-rw-r--r--numpy/lib/financial.py19
-rw-r--r--numpy/lib/format.py149
-rw-r--r--numpy/lib/function_base.py226
-rw-r--r--numpy/lib/index_tricks.py176
-rw-r--r--numpy/lib/io.py339
-rw-r--r--numpy/lib/polynomial.py313
-rw-r--r--numpy/lib/scimath.py199
-rw-r--r--numpy/lib/stride_tricks.py7
-rw-r--r--numpy/lib/twodim_base.py198
-rw-r--r--numpy/lib/type_check.py88
-rw-r--r--numpy/lib/utils.py100
15 files changed, 1846 insertions, 747 deletions
diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py
index 0fe594ac3..4bfbf0ac4 100644
--- a/numpy/lib/_datasource.py
+++ b/numpy/lib/_datasource.py
@@ -15,7 +15,7 @@ DataSource files can originate locally or remotely:
DataSource files can also be compressed or uncompressed. Currently only gzip
and bz2 are supported.
-Example:
+Example::
>>> # Create a DataSource, use os.curdir (default) for local storage.
>>> ds = datasource.DataSource()
@@ -43,6 +43,28 @@ from shutil import rmtree
# TODO: .zip support, .tar support?
class _FileOpeners(object):
+ """
+ Container for different methods to open (un-)compressed files.
+
+ `_FileOpeners` contains a dictionary that holds one method for each
+ supported file format. Attribute lookup is implemented in such a way that
+ an instance of `_FileOpeners` itself can be indexed with the keys of that
+ dictionary. Currently uncompressed files as well as files
+ compressed with ``gzip`` or ``bz2`` compression are supported.
+
+ Notes
+ -----
+ `_file_openers`, an instance of `_FileOpeners`, is made available for
+ use in the `_datasource` module.
+
+ Examples
+ --------
+ >>> np.lib._datasource._file_openers.keys()
+ [None, '.bz2', '.gz']
+ >>> np.lib._datasource._file_openers['.gz'] is gzip.open
+ True
+
+ """
def __init__(self):
self._loaded = False
self._file_openers = {None: open}
@@ -62,6 +84,21 @@ class _FileOpeners(object):
self._loaded = True
def keys(self):
+ """
+ Return the keys of currently supported file openers.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ keys : list
+ The keys are None for uncompressed files and the file extension
+ strings (i.e. ``'.gz'``, ``'.bz2'``) for supported compression
+ methods.
+
+ """
self._load()
return self._file_openers.keys()
def __getitem__(self, key):
@@ -71,23 +108,34 @@ class _FileOpeners(object):
_file_openers = _FileOpeners()
def open(path, mode='r', destpath=os.curdir):
- """Open ``path`` with ``mode`` and return the file object.
-
- If ``path`` is an URL, it will be downloaded, stored in the DataSource
- directory and opened from there.
-
- *Parameters*:
-
- path : {string}
-
- mode : {string}, optional
-
- destpath : {string}, optional
- Destination directory where URLs will be downloaded and stored.
-
- *Returns*:
-
- file object
+ """
+ Open `path` with `mode` and return the file object.
+
+ If ``path`` is an URL, it will be downloaded, stored in the `DataSource`
+ `destpath` directory and opened from there.
+
+ Parameters
+ ----------
+ path : str
+ Local file path or URL to open.
+ mode : str, optional
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
+ append. Available modes depend on the type of object specified by path.
+ Default is 'r'.
+ destpath : str, optional
+ Path to the directory where the source file gets downloaded to for use.
+ If `destpath` is None, a temporary directory will be created. The
+ default path is the current directory.
+
+ Returns
+ -------
+ out : file object
+ The opened file.
+
+ Notes
+ -----
+ This is a convenience function that instantiates a `DataSource` and
+ returns the file object from ``DataSource.open(path)``.
"""
@@ -96,49 +144,52 @@ def open(path, mode='r', destpath=os.curdir):
class DataSource (object):
- """A generic data source file (file, http, ftp, ...).
+ """
+ DataSource(destpath='.')
+
+ A generic data source file (file, http, ftp, ...).
- DataSources could be local files or remote files/URLs. The files may
- also be compressed or uncompressed. DataSource hides some of the low-level
+ DataSources can be local files or remote files/URLs. The files may
+ also be compressed or uncompressed. DataSource hides some of the low-level
details of downloading the file, allowing you to simply pass in a valid
file path (or URL) and obtain a file object.
- *Methods*:
-
- - exists : test if the file exists locally or remotely
- - abspath : get absolute path of the file in the DataSource directory
- - open : open the file
-
- *Example URL DataSource*::
+ Parameters
+ ----------
+ destpath : str or None, optional
+ Path to the directory where the source file gets downloaded to for use.
+ If `destpath` is None, a temporary directory will be created.
+ The default path is the current directory.
- # Initialize DataSource with a local directory, default is os.curdir.
- ds = DataSource('/home/guido')
+ Notes
+ -----
+ URLs require a scheme string (``http://``) to be used, without it they
+ will fail::
- # Open remote file.
- # File will be downloaded and opened from here:
- # /home/guido/site/xyz.txt
- ds.open('http://fake.xyz.web/site/xyz.txt')
+ >>> repos = DataSource()
+ >>> repos.exists('www.google.com/index.html')
+ False
+ >>> repos.exists('http://www.google.com/index.html')
+ True
- *Example using DataSource for temporary files*::
+ Temporary directories are deleted when the DataSource is deleted.
- # Initialize DataSource with 'None' for the local directory.
- ds = DataSource(None)
+ Examples
+ --------
- # Open local file.
- # Opened file exists in a temporary directory like:
- # /tmp/tmpUnhcvM/foobar.txt
- # Temporary directories are deleted when the DataSource is deleted.
- ds.open('/home/guido/foobar.txt')
+ ::
- *Notes*:
- BUG : URLs require a scheme string ('http://') to be used.
- www.google.com will fail.
+ >>> ds = DataSource('/home/guido')
+ >>> urlname = 'http://www.google.com/index.html'
+ >>> gfile = ds.open('http://www.google.com/index.html') # open remote file
+ >>> ds.abspath(urlname)
+ '/home/guido/www.google.com/site/index.html'
- >>> repos.exists('www.google.com/index.html')
- False
-
- >>> repos.exists('http://www.google.com/index.html')
- True
+ >>> ds = DataSource(None) # use with temporary file
+ >>> ds.open('/home/guido/foobar.txt')
+ <open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
+ >>> ds.abspath('/home/guido/foobar.txt')
+ '/tmp/tmpy4pgsP/home/guido/foobar.txt'
"""
@@ -278,25 +329,23 @@ class DataSource (object):
"""
Return absolute path of file in the DataSource directory.
- If `path` is an URL, the ``abspath`` will be either the location
+ If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
- using the ``open`` method.
-
- The functionality is idential to os.path.abspath.
+ using the `open` method.
Parameters
----------
- path : string
+ path : str
Can be a local file or a remote URL.
Returns
-------
- out : string
- Complete path, rooted in the DataSource destination directory.
+ out : str
+ Complete path, including the `DataSource` destination directory.
- See Also
- --------
- open
+ Notes
+ -----
+ The functionality is based on `os.path.abspath`.
"""
# We do this here to reduce the 'import numpy' initial import time.
@@ -340,14 +389,13 @@ class DataSource (object):
Test if `path` exists as (and in this order):
- a local file.
- - a remote URL that have been downloaded and stored locally in the
- DataSource directory.
- - a remote URL that has not been downloaded, but is valid and
- accessible.
+ - a remote URL that has been downloaded and stored locally in the
+ `DataSource` directory.
+ - a remote URL that has not been downloaded, but is valid and accessible.
Parameters
----------
- path : string
+ path : str
Can be a local file or a remote URL.
Returns
@@ -355,16 +403,12 @@ class DataSource (object):
out : bool
True if `path` exists.
- See Also
- --------
- abspath
-
Notes
-----
- When `path` is an URL, ``exist`` will return True if it's either stored
- locally in the DataSource directory, or is a valid remote URL. DataSource
- does not discriminate between to two, the file is accessible if it exists
- in either location.
+ When `path` is an URL, `exists` will return True if it's either stored
+ locally in the `DataSource` directory, or is a valid remote URL.
+ `DataSource` does not discriminate between the two, the file is accessible
+ if it exists in either location.
"""
# We import this here because importing urllib2 is slow and
@@ -394,17 +438,17 @@ class DataSource (object):
"""
Open and return file-like object.
- If ``path`` is an URL, it will be downloaded, stored in the DataSource
+ If `path` is an URL, it will be downloaded, stored in the `DataSource`
directory and opened from there.
Parameters
----------
- path : string
- Local file path or URL to open
+ path : str
+ Local file path or URL to open.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
- `path`.
+ `path`. Default is 'r'.
Returns
-------
@@ -435,30 +479,39 @@ class DataSource (object):
class Repository (DataSource):
"""
- A data Repository where multiple DataSource's share a base URL/directory.
+ Repository(baseurl, destpath='.')
- Repository extends DataSource by prepending a base URL (or directory) to
- all the files it handles. Use a Repository when you will be working with
- multiple files from one base URL. Initialize the Respository with the
+ A data repository where multiple DataSource's share a base URL/directory.
+
+ `Repository` extends `DataSource` by prepending a base URL (or directory)
+ to all the files it handles. Use `Repository` when you will be working
+ with multiple files from one base URL. Initialize `Repository` with the
base URL, then refer to each file by its filename only.
- *Methods*:
+ Parameters
+ ----------
+ baseurl : str
+ Path to the local directory or remote location that contains the
+ data files.
+ destpath : str or None, optional
+ Path to the directory where the source file gets downloaded to for use.
+ If `destpath` is None, a temporary directory will be created.
+ The default path is the current directory.
- - exists : test if the file exists locally or remotely
- - abspath : get absolute path of the file in the DataSource directory
- - open : open the file
+ Examples
+ --------
+ To analyze all files in the repository, do something like this
+ (note: this is not self-contained code)::
- *Toy example*::
+ >>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
+ >>> for filename in filelist:
+ ... fp = repos.open(filename)
+ ... fp.analyze()
+ ... fp.close()
- # Analyze all files in the repository.
- repos = Repository('/home/user/data/dir/')
- for filename in filelist:
- fp = repos.open(filename)
- fp.analyze()
- fp.close()
+ Similarly you could use a URL for a repository::
- # Similarly you could use a URL for a repository.
- repos = Repository('http://www.xyz.edu/data')
+ >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
"""
@@ -487,25 +540,20 @@ class Repository (DataSource):
"""
Return absolute path of file in the Repository directory.
- If `path` is an URL, the ``abspath`` will be either the location
+ If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
- using the ``open`` method.
-
- The functionality is idential to os.path.abspath.
+ using the `open` method.
Parameters
----------
- path : string
- Can be a local file or a remote URL.
+ path : str
+ Can be a local file or a remote URL. This may, but does not have
+ to, include the `baseurl` with which the `Repository` was initialized.
Returns
-------
- out : string
- Complete path, rooted in the DataSource destination directory.
-
- See Also
- --------
- open
+ out : str
+ Complete path, including the `DataSource` destination directory.
"""
return DataSource.abspath(self, self._fullpath(path))
@@ -517,31 +565,28 @@ class Repository (DataSource):
Test if `path` exists as (and in this order):
- a local file.
- - a remote URL that have been downloaded and stored locally in the
- DataSource directory.
+ - a remote URL that has been downloaded and stored locally in the
+ `DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
- path : string
- Can be a local file or a remote URL.
+ path : str
+ Can be a local file or a remote URL. This may, but does not have
+ to, include the `baseurl` with which the `Repository` was initialized.
Returns
-------
out : bool
True if `path` exists.
- See Also
- --------
- abspath
-
Notes
-----
- When `path` is an URL, ``exist`` will return True if it's either stored
- locally in the DataSource directory, or is a valid remote URL. DataSource
- does not discriminate between to two, the file is accessible if it exists
- in either location.
+ When `path` is an URL, `exists` will return True if it's either stored
+ locally in the `DataSource` directory, or is a valid remote URL.
+ `DataSource` does not discriminate between the two, the file is accessible
+ if it exists in either location.
"""
return DataSource.exists(self, self._fullpath(path))
@@ -555,12 +600,13 @@ class Repository (DataSource):
Parameters
----------
- path : string
- Local file path or URL to open
+ path : str
+ Local file path or URL to open. This may, but does not have to,
+ include the `baseurl` with which the `Repository` was initialized.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
- `path`.
+ `path`. Default is 'r'.
Returns
-------
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index 250a14795..d2228bc4a 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -1,7 +1,4 @@
-"""
-A collection of functions designed to help I/O with ascii file.
-
-"""
+"""A collection of functions designed to help I/O with ascii files."""
__docformat__ = "restructuredtext en"
import numpy as np
@@ -56,7 +53,23 @@ def _to_filehandle(fname, flag='r', return_opened=False):
def has_nested_fields(ndtype):
"""
- Returns whether one or several fields of a structured array are nested.
+ Returns whether one or several fields of a dtype are nested.
+
+ Parameters
+ ----------
+ ndtype : dtype
+ Data-type of a structured array.
+
+ Raises
+ ------
+ AttributeError : If `ndtype` does not have a `names` attribute.
+
+ Examples
+ --------
+ >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])
+ >>> np.lib._iotools.has_nested_fields(dt)
+ False
+
"""
for name in ndtype.names or ():
if ndtype[name].names:
@@ -66,8 +79,8 @@ def has_nested_fields(ndtype):
def flatten_dtype(ndtype, flatten_base=False):
"""
- Unpack a structured data-type by collapsing nested fields and/or fields with
- a shape.
+ Unpack a structured data-type by collapsing nested fields and/or fields
+ with a shape.
Note that the field names are lost.
@@ -81,13 +94,14 @@ def flatten_dtype(ndtype, flatten_base=False):
Examples
--------
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
- ... ('block', int, (2, 3))])
- >>> flatten_dtype(dt)
- [dtype('|S4'), dtype('float64'), dtype('float64'), dtype(('int32',(2, 3)))]
- >>> flatten_dtype(dt, flatten_base=True)
+ ... ('block', int, (2, 3))])
+ >>> np.lib._iotools.flatten_dtype(dt)
+ [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32')]
+ >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
[dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32'),
dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'),
dtype('int32')]
+
"""
names = ndtype.names
if names is None:
@@ -106,21 +120,38 @@ def flatten_dtype(ndtype, flatten_base=False):
class LineSplitter:
"""
- Defines a function to split a string at a given delimiter or at given places.
-
+ Object to split a string at a given delimiter or at given places.
+
Parameters
----------
- comment : {'#', string}
- Character used to mark the beginning of a comment.
- delimiter : var, optional
+ delimiter : str, int, or sequence of ints, optional
If a string, character used to delimit consecutive fields.
If an integer or a sequence of integers, width(s) of each field.
- autostrip : boolean, optional
- Whether to strip each individual fields
+ comment : str, optional
+ Character used to mark the beginning of a comment. Default is '#'.
+ autostrip : bool, optional
+ Whether to strip each individual field. Default is True.
+
"""
def autostrip(self, method):
- "Wrapper to strip each member of the output of `method`."
+ """
+ Wrapper to strip each member of the output of `method`.
+
+ Parameters
+ ----------
+ method : function
+ Function that takes a single argument and returns a sequence of
+ strings.
+
+ Returns
+ -------
+ wrapped : function
+ The result of wrapping `method`. `wrapped` takes a single input
+ argument and returns a list of strings that are stripped of
+ white-space.
+
+ """
return lambda input: [_.strip() for _ in method(input)]
#
def __init__(self, delimiter=None, comments='#', autostrip=True):
@@ -173,33 +204,52 @@ class LineSplitter:
class NameValidator:
"""
- Validates a list of strings to use as field names.
- The strings are stripped of any non alphanumeric character, and spaces
- are replaced by `_`. If the optional input parameter `case_sensitive`
- is False, the strings are set to upper case.
+ Object to validate a list of strings to use as field names.
- During instantiation, the user can define a list of names to exclude, as
- well as a list of invalid characters. Names in the exclusion list
- are appended a '_' character.
+ The strings are stripped of any non alphanumeric character, and spaces
+ are replaced by '_'. During instantiation, the user can define a list of
+ names to exclude, as well as a list of invalid characters. Names in the
+ exclusion list are appended a '_' character.
- Once an instance has been created, it can be called with a list of names
+ Once an instance has been created, it can be called with a list of names,
and a list of valid names will be created.
- The `__call__` method accepts an optional keyword, `default`, that sets
- the default name in case of ambiguity. By default, `default = 'f'`, so
- that names will default to `f0`, `f1`
+ The `__call__` method accepts an optional keyword "default" that sets
+ the default name in case of ambiguity. By default this is 'f', so
+ that names will default to `f0`, `f1`, etc.
Parameters
----------
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
- ['return','file','print']. Excluded names are appended an underscore:
- for example, `file` would become `file_`.
- deletechars : string, optional
- A string combining invalid characters that must be deleted from the names.
+ ['return', 'file', 'print']. Excluded names are appended an underscore:
+ for example, `file` becomes `file_` if supplied.
+ deletechars : str, optional
+ A string combining invalid characters that must be deleted from the
+ names.
casesensitive : {True, False, 'upper', 'lower'}, optional
- If True, field names are case_sensitive.
- If False or 'upper', field names are converted to upper case.
- If 'lower', field names are converted to lower case.
+ * If True, field names are case-sensitive.
+ * If False or 'upper', field names are converted to upper case.
+ * If 'lower', field names are converted to lower case.
+
+ The default value is True.
+
+ Notes
+ -----
+ Calling an instance of `NameValidator` is the same as calling its method
+ `validate`.
+
+ Examples
+ --------
+ >>> validator = np.lib._iotools.NameValidator()
+ >>> validator(['file', 'field2', 'with space', 'CaSe'])
+ ['file_', 'field2', 'with_space', 'CaSe']
+
+ >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
+ deletechars='q',
+ case_sensitive='False')
+ >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
+ ['excl_', 'field2', 'no_', 'with_space', 'case']
+
"""
#
defaultexcludelist = ['return','file','print']
@@ -229,6 +279,28 @@ class NameValidator:
self.case_converter = lambda x: x
#
def validate(self, names, default='f'):
+ """
+ Validate a list of strings to use as field names for a structured array.
+
+ Parameters
+ ----------
+ names : list of str
+ The strings that are to be validated.
+ default : str, optional
+ The default field name, used if validating a given string reduces its
+ length to zero.
+
+ Returns
+ -------
+ validatednames : list of str
+ The list of validated field names.
+
+ Notes
+ -----
+ A `NameValidator` instance can be called directly, which is the same as
+ calling `validate`. For examples, see `NameValidator`.
+
+ """
#
if names is None:
return
@@ -265,11 +337,29 @@ class NameValidator:
def str2bool(value):
"""
Tries to transform a string supposed to represent a boolean to a boolean.
-
+
+ Parameters
+ ----------
+ value : str
+ The string that is transformed to a boolean.
+
+ Returns
+ -------
+ boolval : bool
+ The boolean representation of `value`.
+
Raises
------
ValueError
If the string is not 'True' or 'False' (case independent)
+
+ Examples
+ --------
+ >>> np.lib._iotools.str2bool('TRUE')
+ True
+ >>> np.lib._iotools.str2bool('false')
+ False
+
"""
value = value.upper()
if value == 'TRUE':
@@ -286,43 +376,45 @@ class StringConverter:
Factory class for function transforming a string into another object (int,
float).
- After initialization, an instance can be called to transform a string
+ After initialization, an instance can be called to transform a string
into another object. If the string is recognized as representing a missing
value, a default value is returned.
- Parameters
- ----------
- dtype_or_func : {None, dtype, function}, optional
- Input data type, used to define a basic function and a default value
- for missing data. For example, when `dtype` is float, the :attr:`func`
- attribute is set to ``float`` and the default value to `np.nan`.
- Alternatively, function used to convert a string to another object.
- In that later case, it is recommended to give an associated default
- value as input.
- default : {None, var}, optional
- Value to return by default, that is, when the string to be converted
- is flagged as missing.
- missing_values : {sequence}, optional
- Sequence of strings indicating a missing value.
- locked : {boolean}, optional
- Whether the StringConverter should be locked to prevent automatic
- upgrade or not.
-
Attributes
----------
func : function
- Function used for the conversion
- default : var
+ Function used for the conversion.
+ default : any
Default value to return when the input corresponds to a missing value.
type : type
Type of the output.
- _status : integer
+ _status : int
Integer representing the order of the conversion.
_mapper : sequence of tuples
- Sequence of tuples (dtype, function, default value) to evaluate in order.
- _locked : boolean
- Whether the StringConverter is locked, thereby preventing automatic any
- upgrade or not.
+ Sequence of tuples (dtype, function, default value) to evaluate in
+ order.
+ _locked : bool
+ Holds `locked` parameter.
+
+ Parameters
+ ----------
+ dtype_or_func : {None, dtype, function}, optional
+ If a `dtype`, specifies the input data type, used to define a basic
+ function and a default value for missing data. For example, when
+ `dtype` is float, the `func` attribute is set to `float` and the
+ default value to `np.nan`.
+ If a function, this function is used to convert a string to another
+ object. In this case, it is recommended to give an associated default
+ value as input.
+ default : any, optional
+ Value to return by default, that is, when the string to be converted
+ is flagged as missing. If not given, `StringConverter` tries to supply
+ a reasonable default value.
+ missing_values : sequence of str, optional
+ Sequence of strings indicating a missing value.
+ locked : bool, optional
+ Whether the StringConverter should be locked to prevent automatic
+ upgrade or not. Default is False.
"""
#
@@ -457,10 +549,24 @@ class StringConverter:
#
def upgrade(self, value):
"""
- Tries to find the best converter for `value`, by testing different
- converters in order.
- The order in which the converters are tested is read from the
- :attr:`_status` attribute of the instance.
+ Try to find the best converter for a given string, and return the result.
+
+ The supplied string `value` is converted by testing different
+ converters in order. First the `func` method of the `StringConverter`
+ instance is tried, if this fails other available converters are tried.
+ The order in which these other converters are tried is determined by the
+ `_status` attribute of the instance.
+
+ Parameters
+ ----------
+ value : str
+ The string to convert.
+
+ Returns
+ -------
+ out : any
+ The result of converting `value` with the appropriate converter.
+
"""
self._checked = True
try:
@@ -480,18 +586,28 @@ class StringConverter:
#
def update(self, func, default=None, missing_values='', locked=False):
"""
- Sets the :attr:`func` and :attr:`default` attributes directly.
+ Set StringConverter attributes directly.
+
+ Parameters
+ ----------
+ func : function
+ Conversion function.
+ default : any, optional
+ Value to return by default, that is, when the string to be converted
+ is flagged as missing. If not given, `StringConverter` tries to supply
+ a reasonable default value.
+ missing_values : sequence of str, optional
+ Sequence of strings indicating a missing value.
+ locked : bool, optional
+ Whether the StringConverter should be locked to prevent automatic
+ upgrade or not. Default is False.
+
+ Notes
+ -----
+ `update` takes the same parameters as the constructor of `StringConverter`,
+ except that `func` does not accept a `dtype` whereas `dtype_or_func` in
+ the constructor does.
- Parameters
- ----------
- func : function
- Conversion function.
- default : {var}, optional
- Default value to return when a missing value is encountered.
- missing_values : {var}, optional
- Sequence of strings representing missing values.
- locked : {False, True}, optional
- Whether the status should be locked to prevent automatic upgrade.
"""
self.func = func
self._locked = locked
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index 89f82a942..6868dc1d4 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -96,32 +96,37 @@ def unique(ar, return_index=False, return_inverse=False):
"""
Find the unique elements of an array.
+ Returns the sorted unique elements of an array. There are two optional
+ outputs in addition to the unique elements: the indices of the input array
+ that give the unique values, and the indices of the unique array that
+ reconstruct the input array.
+
Parameters
----------
ar : array_like
- This array will be flattened if it is not already 1-D.
+ Input array. This will be flattened if it is not already 1-D.
return_index : bool, optional
- If True, also return the indices against `ar1` that result in the
- unique array.
+ If True, also return the indices of `ar` that result in the unique
+ array.
return_inverse : bool, optional
- If True, also return the indices against the unique array that
- result in `ar`.
+ If True, also return the indices of the unique array that can be used
+ to reconstruct `ar`.
Returns
-------
unique : ndarray
- The unique values.
+ The sorted unique values.
unique_indices : ndarray, optional
- The indices of the unique values. Only provided if `return_index` is
- True.
+ The indices of the unique values in the (flattened) original array.
+ Only provided if `return_index` is True.
unique_inverse : ndarray, optional
- The indices to reconstruct the original array. Only provided if
- `return_inverse` is True.
+ The indices to reconstruct the (flattened) original array from the
+ unique array. Only provided if `return_inverse` is True.
See Also
--------
- numpy.lib.arraysetops : Module with a number of other functions
- for performing set operations on arrays.
+ numpy.lib.arraysetops : Module with a number of other functions for
+ performing set operations on arrays.
Examples
--------
@@ -131,17 +136,29 @@ def unique(ar, return_index=False, return_inverse=False):
>>> np.unique(a)
array([1, 2, 3])
- Reconstruct the input from unique values:
+ Return the indices of the original array that give the unique values:
- >>> np.unique([1,2,6,4,2,3,2], return_index=True)
- >>> x = [1,2,6,4,2,3,2]
- >>> u, i = np.unique(x, return_inverse=True)
+ >>> a = np.array(['a', 'b', 'b', 'c', 'a'])
+ >>> u, indices = np.unique(a, return_index=True)
+ >>> u
+ array(['a', 'b', 'c'],
+ dtype='|S1')
+ >>> indices
+ array([0, 1, 3])
+ >>> a[indices]
+ array(['a', 'b', 'c'],
+ dtype='|S1')
+
+ Reconstruct the input array from the unique values:
+
+ >>> a = np.array([1, 2, 6, 4, 2, 3, 2])
+ >>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1, 2, 3, 4, 6])
- >>> i
+ >>> indices
array([0, 1, 4, 3, 1, 2, 1])
- >>> [u[p] for p in i]
- [1, 2, 6, 4, 2, 3, 2]
+ >>> u[indices]
+ array([1, 2, 6, 4, 2, 3, 2])
"""
try:
@@ -183,7 +200,9 @@ def unique(ar, return_index=False, return_inverse=False):
def intersect1d(ar1, ar2, assume_unique=False):
"""
- Intersection returning unique elements common to both arrays.
+ Find the intersection of two arrays.
+
+ Return the sorted, unique values that are in both of the input arrays.
Parameters
----------
@@ -195,7 +214,7 @@ def intersect1d(ar1, ar2, assume_unique=False):
Returns
-------
- out : ndarray, shape(N,)
+ out : ndarray
Sorted 1D array of common and unique elements.
See Also
@@ -205,7 +224,7 @@ def intersect1d(ar1, ar2, assume_unique=False):
Examples
--------
- >>> np.intersect1d([1,3,3], [3,1,1])
+ >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
array([1, 3])
"""
@@ -219,7 +238,10 @@ def intersect1d(ar1, ar2, assume_unique=False):
def setxor1d(ar1, ar2, assume_unique=False):
"""
- Set exclusive-or of two 1D arrays.
+ Find the set exclusive-or of two arrays.
+
+ Return the sorted, unique values that are in only one (not both) of the
+ input arrays.
Parameters
----------
@@ -232,12 +254,15 @@ def setxor1d(ar1, ar2, assume_unique=False):
Returns
-------
xor : ndarray
- The values that are only in one, but not both, of the input arrays.
+ Sorted 1D array of unique values that are in only one of the input
+ arrays.
- See Also
+ Examples
--------
- numpy.lib.arraysetops : Module with a number of other functions for
- performing set operations on arrays.
+ >>> a = np.array([1, 2, 3, 2, 4])
+ >>> b = np.array([2, 3, 5, 7, 5])
+ >>> np.setxor1d(a,b)
+ array([1, 4, 5, 7])
"""
if not assume_unique:
@@ -257,22 +282,24 @@ def setxor1d(ar1, ar2, assume_unique=False):
def in1d(ar1, ar2, assume_unique=False):
"""
- Test whether each element of an array is also present in a second array.
+ Test whether each element of a 1D array is also present in a second array.
Returns a boolean array the same length as `ar1` that is True
where an element of `ar1` is in `ar2` and False otherwise.
Parameters
----------
- ar1, ar2 : array_like
- Input arrays.
- assume_unique : bool
+ ar1 : array_like, shape (M,)
+ Input array.
+ ar2 : array_like
+ The values against which to test each value of `ar1`.
+ assume_unique : bool, optional
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
- mask : ndarray, bool
+ mask : ndarray of bools, shape(M,)
The values `ar1[mask]` are in `ar2`.
See Also
@@ -282,17 +309,21 @@ def in1d(ar1, ar2, assume_unique=False):
Notes
-----
+ `in1d` can be considered as an element-wise function version of the
+ python keyword `in`, for 1D sequences. ``in1d(a, b)`` is roughly
+ equivalent to ``np.array([item in b for item in a])``.
+
.. versionadded:: 1.4.0
Examples
--------
- >>> test = np.arange(5)
+ >>> test = np.array([0, 1, 2, 5, 0])
>>> states = [0, 2]
- >>> mask = np.setmember1d(test, states)
+ >>> mask = np.in1d(test, states)
>>> mask
- array([ True, False, True, False, False], dtype=bool)
+ array([ True, False, True, False, True], dtype=bool)
>>> test[mask]
- array([0, 2])
+ array([0, 2, 0])
"""
if not assume_unique:
@@ -316,31 +347,39 @@ def in1d(ar1, ar2, assume_unique=False):
def union1d(ar1, ar2):
"""
- Union of two 1D arrays.
+ Find the union of two arrays.
+
+ Return the unique, sorted array of values that are in either of the two
+ input arrays.
Parameters
----------
- ar1 : array_like, shape(M,)
- Input array.
- ar2 : array_like, shape(N,)
- Input array.
+ ar1, ar2 : array_like
+ Input arrays. They are flattened if they are not already 1D.
Returns
-------
union : ndarray
- Unique union of input arrays.
+ Unique, sorted union of the input arrays.
- See also
+ See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
+ Examples
+ --------
+ >>> np.union1d([-1, 0, 1], [-2, 0, 2])
+ array([-2, -1, 0, 1, 2])
+
"""
return unique( np.concatenate( (ar1, ar2) ) )
def setdiff1d(ar1, ar2, assume_unique=False):
"""
- Set difference of two 1D arrays.
+ Find the set difference of two arrays.
+
+ Return the sorted, unique values in `ar1` that are not in `ar2`.
Parameters
----------
@@ -355,13 +394,20 @@ def setdiff1d(ar1, ar2, assume_unique=False):
Returns
-------
difference : ndarray
- The values in ar1 that are not in ar2.
+ Sorted 1D array of values in `ar1` that are not in `ar2`.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
+ Examples
+ --------
+ >>> a = np.array([1, 2, 3, 2, 4, 1])
+ >>> b = np.array([3, 4, 5, 6])
+ >>> np.setdiff1d(a, b)
+ array([1, 2])
+
"""
if not assume_unique:
ar1 = unique(ar1)
diff --git a/numpy/lib/arrayterator.py b/numpy/lib/arrayterator.py
index 581e0b31e..d4a91b001 100644
--- a/numpy/lib/arrayterator.py
+++ b/numpy/lib/arrayterator.py
@@ -2,20 +2,9 @@
A buffered iterator for big arrays.
This module solves the problem of iterating over a big file-based array
-without having to read it into memory. The ``Arrayterator`` class wraps
-an array object, and when iterated it will return subarrays with at most
-``buf_size`` elements.
-
-The algorithm works by first finding a "running dimension", along which
-the blocks will be extracted. Given an array of dimensions (d1, d2, ...,
-dn), eg, if ``buf_size`` is smaller than ``d1`` the first dimension will
-be used. If, on the other hand,
-
- d1 < buf_size < d1*d2
-
-the second dimension will be used, and so on. Blocks are extracted along
-this dimension, and when the last block is returned the process continues
-from the next dimension, until all elements have been read.
+without having to read it into memory. The `Arrayterator` class wraps
+an array object, and when iterated it will return sub-arrays with at most
+a user-specified number of elements.
"""
@@ -29,13 +18,69 @@ class Arrayterator(object):
"""
Buffered iterator for big arrays.
- This class creates a buffered iterator for reading big arrays in small
+ `Arrayterator` creates a buffered iterator for reading big arrays in small
contiguous blocks. The class is useful for objects stored in the
- filesystem. It allows iteration over the object *without* reading
+ file system. It allows iteration over the object *without* reading
everything in memory; instead, small blocks are read and iterated over.
- The class can be used with any object that supports multidimensional
- slices, like variables from Scientific.IO.NetCDF, pynetcdf and ndarrays.
+ `Arrayterator` can be used with any object that supports multidimensional
+ slices. This includes NumPy arrays, but also variables from
+ Scientific.IO.NetCDF or pynetcdf for example.
+
+ Parameters
+ ----------
+ var : array_like
+ The object to iterate over.
+ buf_size : int, optional
+ The buffer size. If `buf_size` is supplied, the maximum amount of
+ data that will be read into memory is `buf_size` elements.
+ Default is None, which will read as many element as possible
+ into memory.
+
+ Attributes
+ ----------
+ var
+ buf_size
+ start
+ stop
+ step
+ shape
+ flat
+
+ See Also
+ --------
+ ndenumerate : Multidimensional array iterator.
+ flatiter : Flat array iterator.
+ memmap : Create a memory-map to an array stored in a binary file on disk.
+
+ Notes
+ -----
+ The algorithm works by first finding a "running dimension", along which
+ the blocks will be extracted. Given an array of dimensions
+ ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the
+ first dimension will be used. If, on the other hand,
+ ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on.
+ Blocks are extracted along this dimension, and when the last block is
+ returned the process continues from the next dimension, until all
+ elements have been read.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
+ >>> a_itor = np.lib.arrayterator.Arrayterator(a, 2)
+ >>> a_itor.shape
+ (3, 4, 5, 6)
+
+ Now we can iterate over ``a_itor``, and it will return arrays of size
+ two. Since `buf_size` was smaller than any dimension, the first
+ dimension will be iterated over first:
+
+ >>> for subarr in a_itor:
+ ... if not subarr.all():
+ ... print subarr, subarr.shape
+ ...
+ [[[[0 1]]]] (1, 1, 1, 2)
"""
diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py
index 503d43647..7f62a81f9 100644
--- a/numpy/lib/financial.py
+++ b/numpy/lib/financial.py
@@ -180,8 +180,8 @@ def pmt(rate, nper, pv, fv=0, when='end'):
Pre-Draft 12. Organization for the Advancement of Structured Information
Standards (OASIS). Billerica, MA, USA. [ODT Document].
Available:
- http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
- OpenDocument-formula-20090508.odt
+ http://www.oasis-open.org/committees/documents.php
+ ?wg_abbrev=office-formulaOpenDocument-formula-20090508.odt
Examples
--------
@@ -469,13 +469,22 @@ def rate(nper, pmt, pv, fv, when='end', guess=0.10, tol=1e-6, maxiter=100):
Notes
-----
- The rate of interest ``rate`` is computed by solving the equation::
+ The rate of interest is computed by iteratively solving the
+ (non-linear) equation::
fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate * ((1+rate)**nper - 1) = 0
- or, if ``rate = 0``::
+ for ``rate``.
- fv + pv + pmt * nper = 0
+ References
+ ----------
+ Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). Open Document
+ Format for Office Applications (OpenDocument)v1.2, Part 2: Recalculated
+ Formula (OpenFormula) Format - Annotated Version, Pre-Draft 12.
+ Organization for the Advancement of Structured Information Standards
+ (OASIS). Billerica, MA, USA. [ODT Document]. Available:
+ http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
+ OpenDocument-formula-20090508.odt
"""
when = _convert_when(when)
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 28444613c..3c5fe3209 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -2,57 +2,136 @@
Define a simple format for saving numpy arrays to disk with the full
information about them.
-WARNING: Due to limitations in the interpretation of structured dtypes, dtypes
-with fields with empty names will have the names replaced by 'f0', 'f1', etc.
-Such arrays will not round-trip through the format entirely accurately. The
-data is intact; only the field names will differ. We are working on a fix for
-this. This fix will not require a change in the file format. The arrays with
-such structures can still be saved and restored, and the correct dtype may be
-restored by using the `loadedarray.view(correct_dtype)` method.
+The ``.npy`` format is the standard binary file format in NumPy for
+persisting a *single* arbitrary NumPy array on disk. The format stores all
+of the shape and dtype information necessary to reconstruct the array
+correctly even on another machine with a different architecture.
+The format is designed to be as simple as possible while achieving
+its limited goals.
+
+The ``.npz`` format is the standard format for persisting *multiple* NumPy
+arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy``
+files, one for each array.
+
+Capabilities
+------------
+
+- Can represent all NumPy arrays including nested record arrays and
+ object arrays.
+
+- Represents the data in its native binary form.
+
+- Supports Fortran-contiguous arrays directly.
+
+- Stores all of the necessary information to reconstruct the array
+ including shape and dtype on a machine of a different
+ architecture. Both little-endian and big-endian arrays are
+ supported, and a file with little-endian numbers will yield
+ a little-endian array on any machine reading the file. The
+ types are described in terms of their actual sizes. For example,
+ if a machine with a 64-bit C "long int" writes out an array with
+ "long ints", a reading machine with 32-bit C "long ints" will yield
+ an array with 64-bit integers.
+
+- Is straightforward to reverse engineer. Datasets often live longer than
+ the programs that created them. A competent developer should be
+ able create a solution in his preferred programming language to
+ read most ``.npy`` files that he has been given without much
+ documentation.
+
+- Allows memory-mapping of the data. See `open_memmep`.
+
+- Can be read from a filelike stream object instead of an actual file.
+
+- Stores object arrays, i.e. arrays containing elements that are arbitrary
+ Python objects. Files with object arrays are not to be mmapable, but
+ can be read and written to disk.
+
+Limitations
+-----------
+
+- Arbitrary subclasses of numpy.ndarray are not completely preserved.
+ Subclasses will be accepted for writing, but only the array data will
+ be written out. A regular numpy.ndarray object will be created
+ upon reading the file.
+
+.. warning::
+
+ Due to limitations in the interpretation of structured dtypes, dtypes
+ with fields with empty names will have the names replaced by 'f0', 'f1',
+ etc. Such arrays will not round-trip through the format entirely
+ accurately. The data is intact; only the field names will differ. We are
+ working on a fix for this. This fix will not require a change in the
+ file format. The arrays with such structures can still be saved and
+ restored, and the correct dtype may be restored by using the
+ ``loadedarray.view(correct_dtype)`` method.
+
+File extensions
+---------------
+
+We recommend using the ``.npy`` and ``.npz`` extensions for files saved
+in this format. This is by no means a requirement; applications may wish
+to use these file formats but use an extension specific to the
+application. In the absence of an obvious alternative, however,
+we suggest using ``.npy`` and ``.npz``.
+
+Version numbering
+-----------------
+
+The version numbering of these formats is independent of NumPy version
+numbering. If the format is upgraded, the code in `numpy.io` will still
+be able to read and write Version 1.0 files.
Format Version 1.0
------------------
-The first 6 bytes are a magic string: exactly "\\\\x93NUMPY".
+The first 6 bytes are a magic string: exactly ``\\x93NUMPY``.
The next 1 byte is an unsigned byte: the major version number of the file
-format, e.g. \\\\x01.
+format, e.g. ``\\x01``.
The next 1 byte is an unsigned byte: the minor version number of the file
-format, e.g. \\\\x00. Note: the version of the file format is not tied to the
-version of the numpy package.
+format, e.g. ``\\x00``. Note: the version of the file format is not tied
+to the version of the numpy package.
-The next 2 bytes form a little-endian unsigned short int: the length of the
-header data HEADER_LEN.
+The next 2 bytes form a little-endian unsigned short int: the length of
+the header data HEADER_LEN.
-The next HEADER_LEN bytes form the header data describing the array's format.
-It is an ASCII string which contains a Python literal expression of a
-dictionary. It is terminated by a newline ('\\\\n') and padded with spaces
-('\\\\x20') to make the total length of the magic string + 4 + HEADER_LEN be
-evenly divisible by 16 for alignment purposes.
+The next HEADER_LEN bytes form the header data describing the array's
+format. It is an ASCII string which contains a Python literal expression
+of a dictionary. It is terminated by a newline (``\\n``) and padded with
+spaces (``\\x20``) to make the total length of
+``magic string + 4 + HEADER_LEN`` be evenly divisible by 16 for alignment
+purposes.
The dictionary contains three keys:
"descr" : dtype.descr
- An object that can be passed as an argument to the numpy.dtype()
- constructor to create the array's dtype.
+ An object that can be passed as an argument to the `numpy.dtype`
+ constructor to create the array's dtype.
"fortran_order" : bool
- Whether the array data is Fortran-contiguous or not. Since
- Fortran-contiguous arrays are a common form of non-C-contiguity, we
- allow them to be written directly to disk for efficiency.
+ Whether the array data is Fortran-contiguous or not. Since
+ Fortran-contiguous arrays are a common form of non-C-contiguity,
+ we allow them to be written directly to disk for efficiency.
"shape" : tuple of int
- The shape of the array.
-
-For repeatability and readability, the dictionary keys are sorted in alphabetic
-order. This is for convenience only. A writer SHOULD implement this if
-possible. A reader MUST NOT depend on this.
-
-Following the header comes the array data. If the dtype contains Python objects
-(i.e. dtype.hasobject is True), then the data is a Python pickle of the array.
-Otherwise the data is the contiguous (either C- or Fortran-, depending on
-fortran_order) bytes of the array. Consumers can figure out the number of bytes
-by multiplying the number of elements given by the shape (noting that shape=()
-means there is 1 element) by dtype.itemsize.
+ The shape of the array.
+
+For repeatability and readability, the dictionary keys are sorted in
+alphabetic order. This is for convenience only. A writer SHOULD implement
+this if possible. A reader MUST NOT depend on this.
+
+Following the header comes the array data. If the dtype contains Python
+objects (i.e. ``dtype.hasobject is True``), then the data is a Python
+pickle of the array. Otherwise the data is the contiguous (either C-
+or Fortran-, depending on ``fortran_order``) bytes of the array.
+Consumers can figure out the number of bytes by multiplying the number
+of elements given by the shape (noting that ``shape=()`` means there is
+1 element) by ``dtype.itemsize``.
+
+Notes
+-----
+The ``.npy`` format, including reasons for creating it and a comparison of
+alternatives, is described fully in the "npy-format" NEP.
"""
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 6956bf366..755229417 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -45,7 +45,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, new=None):
Parameters
----------
a : array_like
- Input data.
+ Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
@@ -62,8 +62,8 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, new=None):
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
- histogram values will often not be equal to 1; it is not a
- probability *mass* function.
+ histogram values will not be equal to 1 unless bins of unity
+ width are chosen; it is not a probability *mass* function.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
@@ -91,7 +91,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, new=None):
See Also
--------
- histogramdd
+ histogramdd, bincount, searchsorted
Notes
-----
@@ -106,8 +106,22 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, new=None):
Examples
--------
- >>> np.histogram([1,2,1], bins=[0,1,2,3])
+ >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
+ >>> np.histogram(np.arange(4), bins=np.arange(5), normed=True)
+ (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
+ >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
+ (array([1, 4, 1]), array([0, 1, 2, 3]))
+ ]), array([0, 1, 2, 3]))
+
+ >>> a = np.arange(5)
+ >>> hist, bin_edges = np.histogram(a, normed=True)
+ >>> hist
+ array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
+ >>> hist.sum()
+ 2.4999999999999996
+ >>> np.sum(hist*np.diff(bin_edges))
+ 1.0
"""
# Old behavior
@@ -401,27 +415,30 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
def average(a, axis=None, weights=None, returned=False):
"""
- Return the weighted average of array over the specified axis.
+ Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
- Data to be averaged.
+ Array containing data to be averaged. If `a` is not an array, a
+ conversion is attempted.
axis : int, optional
- Axis along which to average `a`. If `None`, averaging is done over the
- entire array irrespective of its shape.
+ Axis along which to average `a`. If `None`, averaging is done over
+ the flattened array.
weights : array_like, optional
- The importance that each datum has in the computation of the average.
+ An array of weights associated with the values in `a`. Each value in
+ `a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
- is returned, otherwise only the average is returned. Note that
- if `weights=None`, `sum_of_weights` is equivalent to the number of
+ is returned, otherwise only the average is returned.
+ If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
+
Returns
-------
average, [sum_of_weights] : {array_type, double}
@@ -442,6 +459,8 @@ def average(a, axis=None, weights=None, returned=False):
See Also
--------
+ mean
+
ma.average : average for masked arrays
Examples
@@ -454,6 +473,18 @@ def average(a, axis=None, weights=None, returned=False):
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
+ >>> data = np.arange(6).reshape((3,2))
+ >>> data
+ array([[0, 1],
+ [2, 3],
+ [4, 5]])
+ >>> np.average(data, axis=1, weights=[1./4, 3./4])
+ array([ 0.75, 2.75, 4.75])
+ >>> np.average(data, weights=[1./4, 3./4])
+ Traceback (most recent call last):
+ ...
+ TypeError: Axis must be specified when shapes of a and weights differ.
+
"""
if not isinstance(a, np.matrix) :
a = np.asarray(a)
@@ -1464,12 +1495,35 @@ def nanmin(a, axis=None):
def nanargmin(a, axis=None):
"""
- Return indices of the minimum values along an axis, ignoring NaNs.
+ Return indices of the minimum values over an axis, ignoring NaNs.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : int, optional
+ Axis along which to operate. By default flattened input is used.
+ Returns
+ -------
+ index_array : ndarray
+ An array of indices or a single index value.
See Also
--------
- nanargmax : corresponding function for maxima; see for details.
+ argmin, nanargmax
+
+ Examples
+ --------
+ >>> a = np.array([[np.nan, 4], [2, 3]])
+ >>> np.argmin(a)
+ 0
+ >>> np.nanargmin(a)
+ 2
+ >>> np.nanargmin(a, axis=0)
+ array([1, 1])
+ >>> np.nanargmin(a, axis=1)
+ array([1, 0])
"""
return _nanop(np.argmin, np.inf, a, axis)
@@ -1560,26 +1614,43 @@ def nanargmax(a, axis=None):
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
- array([1, 1])
- >>> np.nanargmax(a, axis=1)
array([1, 0])
+ >>> np.nanargmax(a, axis=1)
+ array([1, 1])
"""
return _nanop(np.argmax, -np.inf, a, axis)
def disp(mesg, device=None, linefeed=True):
"""
- Display a message on a device
+ Display a message on a device.
Parameters
----------
- mesg : string
+ mesg : str
Message to display.
- device : device object with 'write' method
- Device to write message. If None, defaults to ``sys.stdout`` which is
- very similar to ``print``.
+ device : object
+ Device to write message. If None, defaults to ``sys.stdout`` which is
+ very similar to ``print``. `device` needs to have ``write()`` and
+ ``flush()`` methods.
linefeed : bool, optional
- Option whether to print a line feed or not. Defaults to True.
+ Option whether to print a line feed or not. Defaults to True.
+
+ Raises
+ ------
+ AttributeError
+ If `device` does not have a ``write()`` or ``flush()`` method.
+
+ Examples
+ --------
+ Besides ``sys.stdout``, a file-like object can also be used as it has
+ both required methods:
+
+ >>> from StringIO import StringIO
+ >>> buf = StringIO()
+ >>> np.disp('"Display" in a file', device=buf)
+ >>> buf.getvalue()
+ '"Display" in a file\\n'
"""
if device is None:
@@ -1873,6 +1944,30 @@ def corrcoef(x, y=None, rowvar=1, bias=0):
The values of P are between -1 and 1.
+ Parameters
+ ----------
+ m : array_like
+ A 1-D or 2-D array containing multiple variables and observations.
+ Each row of `m` represents a variable, and each column a single
+ observation of all those variables. Also see `rowvar` below.
+ y : array_like, optional
+ An additional set of variables and observations. `y` has the same
+ shape as `m`.
+ rowvar : int, optional
+ If `rowvar` is non-zero (default), then each row represents a
+ variable, with observations in the columns. Otherwise, the relationship
+ is transposed: each column represents a variable, while the rows
+ contain observations.
+ bias : int, optional
+ Default normalization is by ``(N-1)``, where ``N`` is the number of
+ observations given (unbiased estimate). If `bias` is 1, then
+ normalization is by ``N``.
+
+ Returns
+ -------
+ out : ndarray
+ The correlation coefficient matrix of the variables.
+
See Also
--------
cov : Covariance matrix
@@ -2219,19 +2314,17 @@ def hamming(M):
Examples
--------
- >>> from numpy import hamming
- >>> hamming(12)
+ >>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
- >>> from numpy import clip, log10, array, hamming, linspace
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
- >>> window = hamming(51)
+ >>> window = np.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
@@ -2240,10 +2333,10 @@ def hamming(M):
>>> plt.figure()
>>> A = fft(window, 2048) / 25.5
- >>> mag = abs(fftshift(A))
- >>> freq = linspace(-0.5,0.5,len(A))
- >>> response = 20*log10(mag)
- >>> response = clip(response,-100,100)
+ >>> mag = np.abs(fftshift(A))
+ >>> freq = np.linspace(-0.5, 0.5, len(A))
+ >>> response = 20 * np.log10(mag)
+ >>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
>>> plt.title("Frequency response of Hamming window")
>>> plt.ylabel("Magnitude [dB]")
@@ -2341,7 +2434,9 @@ def i0(x):
"""
Modified Bessel function of the first kind, order 0.
- Usually denoted :math:`I_0`.
+ Usually denoted :math:`I_0`. This function does broadcast, but will *not*
+ "up-cast" int dtype arguments unless accompanied by at least one float or
+ complex dtype argument (see Raises below).
Parameters
----------
@@ -2350,19 +2445,36 @@ def i0(x):
Returns
-------
- out : ndarray, shape z.shape, dtype z.dtype
- The modified Bessel function evaluated at the elements of `x`.
+ out : ndarray, shape = x.shape, dtype = x.dtype
+ The modified Bessel function evaluated at each of the elements of `x`.
+
+ Raises
+ ------
+ TypeError: array cannot be safely cast to required type
+ If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
+ Notes
+ -----
+ We use the algorithm published by Clenshaw [1]_ and referenced by
+ Abramowitz and Stegun [2]_, for which the function domain is partitioned
+ into the two intervals [0,8] and (8,inf), and Chebyshev polynomial
+ expansions are employed in each interval. Relative error on the domain
+ [0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16
+ with an rms of 1.4e-16 (n = 30000).
+
References
----------
- .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
- 10th printing, 1964, pp. 374. http://www.math.sfu.ca/~cbm/aands/
- .. [2] Wikipedia, "Bessel function",
- http://en.wikipedia.org/wiki/Bessel_function
+ .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions," in
+ *National Physical Laboratory Mathematical Tables*, vol. 5, London:
+ Her Majesty's Stationery Office, 1962.
+ .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
+ Functions*, 10th printing, New York: Dover, 1964, pp. 379.
+ http://www.math.sfu.ca/~cbm/aands/page_379.htm
+ .. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
@@ -2722,6 +2834,10 @@ def trapz(y, x=None, dx=1.0, axis=-1):
out : float
Definite integral as approximated by trapezoidal rule.
+ See Also
+ --------
+ sum, cumsum
+
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will
@@ -2734,14 +2850,25 @@ def trapz(y, x=None, dx=1.0, axis=-1):
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
- .. [2] Illustration image: http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
+ .. [2] Illustration image:
+ http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
- >>> 4.0
- >>> np.trapz([1,2,3], [4,6,8])
- >>> 8.0
+ 4.0
+ >>> np.trapz([1,2,3], x=[4,6,8])
+ 8.0
+ >>> np.trapz([1,2,3], dx=2)
+ 8.0
+ >>> a = np.arange(6).reshape(2, 3)
+ >>> a
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.trapz(a, axis=0)
+ array([ 1.5, 2.5, 3.5])
+ >>> np.trapz(a, axis=1)
+ array([ 2., 8.])
"""
y = asarray(y)
@@ -2985,8 +3112,9 @@ def insert(arr, obj, values, axis=None):
----------
arr : array_like
Input array.
- obj : int, slice, or array of ints
- Insert `values` before `obj` indices.
+ obj : int, slice or sequence of ints
+ Object that defines the index or indices before which `values` is
+ inserted.
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
@@ -3029,9 +3157,15 @@ def insert(arr, obj, values, axis=None):
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
- >>> np.insert(b, [2, 2], [7.13, False])
+ >>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
+ >>> x = np.arange(8).reshape(2, 4)
+ >>> idx = (1, 3)
+ >>> np.insert(x, idx, 999, axis=1)
+ array([[ 0, 999, 1, 2, 999, 3],
+ [ 4, 999, 5, 6, 999, 7]])
+
"""
wrap = None
if type(arr) is not ndarray:
@@ -3140,6 +3274,10 @@ def append(arr, values, axis=None):
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
+ >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
+ Traceback (most recent call last):
+ ...
+ ValueError: arrays must have same number of dimension
"""
arr = asanyarray(arr)
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index e46c9b763..450ac4df8 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -91,7 +91,7 @@ def ix_(*args):
Using `ix_` one can quickly construct index arrays that will index
the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
- ``[a[1,2] a[1,5] a[3,2] a[3,5]]``.
+ ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
Parameters
----------
@@ -99,7 +99,7 @@ def ix_(*args):
Returns
-------
- out : ndarrays
+ out : tuple of ndarrays
N arrays with N dimensions each, with N the number of input
sequences. Together these arrays form an open mesh.
@@ -110,12 +110,15 @@ def ix_(*args):
Examples
--------
>>> a = np.arange(10).reshape(2, 5)
+ >>> a
+ array([[0, 1, 2, 3, 4],
+ [5, 6, 7, 8, 9]])
>>> ixgrid = np.ix_([0,1], [2,4])
>>> ixgrid
(array([[0],
[1]]), array([[2, 4]]))
- >>> print ixgrid[0].shape, ixgrid[1].shape
- (2, 1) (1, 2)
+ >>> ixgrid[0].shape, ixgrid[1].shape
+ ((2, 1), (1, 2))
>>> a[ixgrid]
array([[2, 4],
[7, 9]])
@@ -140,7 +143,7 @@ class nd_grid(object):
"""
Construct a multi-dimensional "meshgrid".
- grid = nd_grid() creates an instance which will return a mesh-grid
+ ``grid = nd_grid()`` creates an instance which will return a mesh-grid
when indexed. The dimension and number of the output arrays are equal
to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
@@ -150,9 +153,25 @@ class nd_grid(object):
number of points to create between the start and stop values, where
the stop value **is inclusive**.
- If instantiated with an argument of sparse=True, the mesh-grid is
+ If instantiated with an argument of ``sparse=True``, the mesh-grid is
open (or not fleshed out) so that only one-dimension of each returned
- argument is greater than 1
+ argument is greater than 1.
+
+ Parameters
+ ----------
+ sparse : bool, optional
+ Whether the grid is sparse or not. Default is False.
+
+ Notes
+ -----
+ Two instances of `nd_grid` are made available in the NumPy namespace,
+ `mgrid` and `ogrid`::
+
+ mgrid = nd_grid(sparse=False)
+ ogrid = nd_grid(sparse=True)
+
+ Users should use these pre-defined instances instead of using `nd_grid`
+ directly.
Examples
--------
@@ -170,6 +189,7 @@ class nd_grid(object):
[0, 1, 2, 3, 4]]])
>>> mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
+
>>> ogrid = np.lib.index_tricks.nd_grid(sparse=True)
>>> ogrid[0:5,0:5]
[array([[0],
@@ -640,11 +660,44 @@ class IndexExpression(object):
"""
A nicer way to build up index tuples for arrays.
+ .. note::
+ Use one of the two predefined instances `index_exp` or `s_`
+ rather than directly using `IndexExpression`.
+
For any index combination, including slicing and axis insertion,
- 'a[indices]' is the same as 'a[index_exp[indices]]' for any
- array 'a'. However, 'index_exp[indices]' can be used anywhere
+ ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any
+ array `a`. However, ``np.index_exp[indices]`` can be used anywhere
in Python code and returns a tuple of slice objects that can be
used in the construction of complex index expressions.
+
+ Parameters
+ ----------
+ maketuple : bool
+ If True, always returns a tuple.
+
+ See Also
+ --------
+ index_exp : Predefined instance that always returns a tuple:
+ `index_exp = IndexExpression(maketuple=True)`.
+ s_ : Predefined instance without tuple conversion:
+ `s_ = IndexExpression(maketuple=False)`.
+
+ Notes
+ -----
+ You can do all this with `slice()` plus a few special objects,
+ but there's a lot to remember and this version is simpler because
+ it uses the standard array indexing syntax.
+
+ Examples
+ --------
+ >>> np.s_[2::2]
+ slice(2, None, 2)
+ >>> np.index_exp[2::2]
+ (slice(2, None, 2),)
+
+ >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]]
+ array([2, 4])
+
"""
maxint = sys.maxint
def __init__(self, maketuple):
@@ -674,21 +727,16 @@ s_ = IndexExpression(maketuple=False)
# applicable to N-dimensions.
def fill_diagonal(a, val):
- """Fill the main diagonal of the given array of any dimensionality.
-
- For an array with ndim > 2, the diagonal is the list of locations with
- indices a[i,i,...,i], all identical.
-
- This function modifies the input array in-place, it does not return a
- value.
+ """
+ Fill the main diagonal of the given array of any dimensionality.
- This functionality can be obtained via diag_indices(), but internally this
- version uses a much faster implementation that never constructs the indices
- and uses simple slicing.
+ For an array `a` with ``a.ndim > 2``, the diagonal is the list of
+ locations with indices ``a[i, i, ..., i]`` all identical. This function
+ modifies the input array in-place, it does not return a value.
Parameters
----------
- a : array, at least 2-dimensional.
+ a : array, at least 2-D.
Array whose diagonal is to be filled, it gets modified in-place.
val : scalar
@@ -703,29 +751,35 @@ def fill_diagonal(a, val):
-----
.. versionadded:: 1.4.0
+ This functionality can be obtained via `diag_indices`, but internally
+ this version uses a much faster implementation that never constructs the
+ indices and uses simple slicing.
+
Examples
--------
- >>> a = zeros((3,3),int)
- >>> fill_diagonal(a,5)
+ >>> a = zeros((3, 3), int)
+ >>> fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
- The same function can operate on a 4-d array:
- >>> a = zeros((3,3,3,3),int)
- >>> fill_diagonal(a,4)
+ The same function can operate on a 4-D array:
+
+ >>> a = zeros((3, 3, 3, 3), int)
+ >>> fill_diagonal(a, 4)
We only show a few blocks for clarity:
- >>> a[0,0]
+
+ >>> a[0, 0]
array([[4, 0, 0],
[0, 0, 0],
[0, 0, 0]])
- >>> a[1,1]
+ >>> a[1, 1]
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 0]])
- >>> a[2,2]
+ >>> a[2, 2]
array([[0, 0, 0],
[0, 0, 0],
[0, 0, 4]])
@@ -749,12 +803,14 @@ def fill_diagonal(a, val):
def diag_indices(n, ndim=2):
- """Return the indices to access the main diagonal of an array.
+ """
+ Return the indices to access the main diagonal of an array.
This returns a tuple of indices that can be used to access the main
- diagonal of an array with ndim (>=2) dimensions and shape (n,n,...,n). For
- ndim=2 this is the usual diagonal, for ndim>2 this is the set of indices
- to access A[i,i,...,i] for i=[0..n-1].
+ diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape
+ (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for
+ ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]``
+ for ``i = [0..n-1]``.
Parameters
----------
@@ -765,42 +821,47 @@ def diag_indices(n, ndim=2):
ndim : int, optional
The number of dimensions.
- Notes
- -----
- .. versionadded:: 1.4.0
-
See also
--------
diag_indices_from
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
Examples
--------
- Create a set of indices to access the diagonal of a (4,4) array:
- >>> di = diag_indices(4)
+ Create a set of indices to access the diagonal of a (4, 4) array:
- >>> a = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])
+ >>> di = np.diag_indices(4)
+ >>> di
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
+ >>> a = np.arange(16).reshape(4, 4)
>>> a
- array([[ 1, 2, 3, 4],
- [ 5, 6, 7, 8],
- [ 9, 10, 11, 12],
- [13, 14, 15, 16]])
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
>>> a[di] = 100
>>> a
- array([[100, 2, 3, 4],
- [ 5, 100, 7, 8],
- [ 9, 10, 100, 12],
- [ 13, 14, 15, 100]])
+ array([[100, 1, 2, 3],
+ [ 4, 100, 6, 7],
+ [ 8, 9, 100, 11],
+ [ 12, 13, 14, 100]])
+
+ Now, we create indices to manipulate a 3-D array:
- Now, we create indices to manipulate a 3-d array:
- >>> d3 = diag_indices(2,3)
+ >>> d3 = np.diag_indices(2, 3)
+ >>> d3
+ (array([0, 1]), array([0, 1]), array([0, 1]))
- And use it to set the diagonal of a zeros array to 1:
- >>> a = zeros((2,2,2),int)
+ And use it to set the diagonal of an array of zeros to 1:
+
+ >>> a = np.zeros((2, 2, 2), dtype=np.int)
>>> a[d3] = 1
>>> a
array([[[1, 0],
[0, 0]],
-
[[0, 0],
[0, 1]]])
@@ -810,13 +871,18 @@ def diag_indices(n, ndim=2):
def diag_indices_from(arr):
- """Return the indices to access the main diagonal of an n-dimensional array.
+ """
+ Return the indices to access the main diagonal of an n-dimensional array.
- See diag_indices() for full details.
+ See `diag_indices` for full details.
Parameters
----------
- arr : array, at least 2-d
+ arr : array, at least 2-D
+
+ See Also
+ --------
+ diag_indices
Notes
-----
diff --git a/numpy/lib/io.py b/numpy/lib/io.py
index 3a962c7e1..b79286f30 100644
--- a/numpy/lib/io.py
+++ b/numpy/lib/io.py
@@ -57,8 +57,27 @@ def seek_gzip_factory(f):
return f
class BagObj(object):
- """A simple class that converts attribute lookups to
- getitems on the class passed in.
+ """
+ BagObj(obj)
+
+ Convert attribute lookups to getitems on the object passed in.
+
+ Parameters
+ ----------
+ obj : class instance
+ Object on which attribute lookup is performed.
+
+ Examples
+ --------
+ >>> class BagDemo(object):
+ ... def __getitem__(self, key):
+ ... return key
+ ...
+ >>> demo_obj = BagDemo()
+ >>> bagobj = np.lib.io.BagObj(demo_obj)
+ >>> bagobj.some_item
+ 'some_item'
+
"""
def __init__(self, obj):
self._obj = obj
@@ -69,14 +88,57 @@ class BagObj(object):
raise AttributeError, key
class NpzFile(object):
- """A dictionary-like object with lazy-loading of files in the zipped
+ """
+ NpzFile(fid)
+
+ A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
+ `NpzFile` is used to load files in the NumPy ``.npz`` data archive
+ format. It assumes that files in the archive have a ".npy" extension,
+ other files are ignored.
+
The arrays and file strings are lazily loaded on either
- getitem access using obj['key'] or attribute lookup using obj.f.key
+ getitem access using ``obj['key']`` or attribute lookup using
+ ``obj.f.key``. A list of all files (without ".npy" extensions) can
+ be obtained with ``obj.files`` and the ZipFile object itself using
+ ``obj.zip``.
+
+ Attributes
+ ----------
+ files : list of str
+ List of all files in the archive with a ".npy" extension.
+ zip : ZipFile instance
+ The ZipFile object initialized with the zipped archive.
+ f : BagObj instance
+ An object on which attribute can be performed as an alternative
+ to getitem access on the `NpzFile` instance itself.
+
+ Parameters
+ ----------
+ fid : file or str
+ The zipped archive to open. This is either a file-like object
+ or a string containing the path to the archive.
+
+ Examples
+ --------
+ >>> from tempfile import TemporaryFile
+ >>> outfile = TemporaryFile()
+ >>> x = np.arange(10)
+ >>> y = np.sin(x)
+ >>> np.savez(outfile, x=x, y=y)
+ >>> outfile.seek(0)
+
+ >>> npz = np.load(outfile)
+ >>> isinstance(npz, np.lib.io.NpzFile)
+ True
+ >>> npz.files
+ ['y', 'x']
+ >>> npz['x'] # getitem access
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> npz.f.x # attribute lookup
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
- A list of all files (without .npy) extensions can be obtained
- with .files and the ZipFile object itself using .zip
"""
def __init__(self, fid):
# Import is postponed to here since zipfile depends on gzip, an optional
@@ -123,16 +185,23 @@ class NpzFile(object):
return iter(self.files)
def items(self):
+ """
+ Return a list of tuples, with each tuple (filename, array in file).
+
+ """
return [(f, self[f]) for f in self.files]
def iteritems(self):
+ """Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
+ """Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
+ """Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
@@ -241,9 +310,13 @@ def save(file, arr):
See Also
--------
- savez : Save several arrays into a .npz compressed archive
+ savez : Save several arrays into a ``.npz`` compressed archive
savetxt, load
+ Notes
+ -----
+ For a description of the ``.npy`` format, see `format`.
+
Examples
--------
>>> from tempfile import TemporaryFile
@@ -269,7 +342,7 @@ def save(file, arr):
def savez(file, *args, **kwds):
"""
- Save several arrays into a single, compressed file with extension ".npz"
+ Save several arrays into a single, compressed file in ``.npz`` format.
If keyword arguments are given, the names for variables assigned to the
keywords are the keyword names (not the variable names in the caller).
@@ -278,33 +351,57 @@ def savez(file, *args, **kwds):
Parameters
----------
- file : Either the filename (string) or an open file (file-like object)
+ file : str or file
+ Either the file name (string) or an open file (file-like object)
If file is a string, it names the output file. ".npz" will be appended
- if it is not already there.
+ to the file name if it is not already there.
args : Arguments
Any function arguments other than the file name are variables to save.
- Since it is not possible for Python to know their names outside the
- savez function, they will be saved with names "arr_0", "arr_1", and so
- on. These arguments can be any expression.
+ Since it is not possible for Python to know their names outside
+ `savez`, they will be saved with names "arr_0", "arr_1", and so on.
+ These arguments can be any expression.
kwds : Keyword arguments
All keyword=value pairs cause the value to be saved with the name of
the keyword.
See Also
--------
- save : Save a single array to a binary file in NumPy format
- savetxt : Save an array to a file as plain text
+ save : Save a single array to a binary file in NumPy format.
+ savetxt : Save an array to a file as plain text.
Notes
-----
- The .npz file format is a zipped archive of files named after the variables
- they contain. Each file contains one variable in .npy format.
+ The ``.npz`` file format is a zipped archive of files named after the
+ variables they contain. Each file contains one variable in ``.npy``
+ format. For a description of the ``.npy`` format, see `format`.
Examples
--------
- >>> x = np.random.random((3, 3))
- >>> y = np.zeros((3, 2))
- >>> np.savez('data', x=x, y=y)
+ >>> from tempfile import TemporaryFile
+ >>> outfile = TemporaryFile()
+ >>> x = np.arange(10)
+ >>> y = np.sin(x)
+
+ Using `savez` with \\*args, the arrays are saved with default names.
+
+ >>> np.savez(outfile, x, y)
+ >>> outfile.seek(0) # only necessary in this example (with tempfile)
+ >>> npz = np.load(outfile)
+ >>> npz.files
+ ['arr_1', 'arr_0']
+ >>> npz['arr_0']
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+ Using `savez` with \\*\\*kwds, the arrays are saved with the keyword names.
+
+ >>> outfile = TemporaryFile()
+ >>> np.savez(outfile, x=x, y=y)
+ >>> outfile.seek(0)
+ >>> npz = np.load(outfile)
+ >>> npz.files
+ ['y', 'x']
+ >>> npz['x']
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
@@ -373,33 +470,33 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None,
Parameters
----------
- fname : file or string
+ fname : file or str
File or filename to read. If the filename extension is ``.gz`` or
``.bz2``, the file is first decompressed.
- dtype : data-type
+ dtype : dtype, optional
Data type of the resulting array. If this is a record data-type,
the resulting array will be 1-dimensional, and each row will be
interpreted as an element of the array. In this case, the number
of columns used must match the number of fields in the data-type.
- comments : string, optional
+ comments : str, optional
The character used to indicate the start of a comment.
- delimiter : string, optional
+ delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
- converters : {}
+ converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data:
``converters = {3: lambda s: float(s or 0)}``.
- skiprows : int
+ skiprows : int, optional
Skip the first `skiprows` lines.
- usecols : sequence
+ usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
- unpack : bool
+ unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
- unpacked using ``x, y, z = loadtxt(...)``
+ unpacked using ``x, y, z = loadtxt(...)``. Default is False.
Returns
-------
@@ -408,6 +505,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None,
See Also
--------
+ load, fromstring, fromregex
scipy.io.loadmat : reads Matlab(R) data files
Examples
@@ -425,7 +523,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None,
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
- >>> x,y = np.loadtxt(c, delimiter=',', usecols=(0,2), unpack=True)
+ >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
@@ -575,8 +673,8 @@ def savetxt(fname, X, fmt='%.18e',delimiter=' '):
See Also
--------
- save : Save an array to a binary file in NumPy format
- savez : Save several arrays into an .npz compressed archive
+ save : Save an array to a binary file in NumPy ``.npy`` format
+ savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
@@ -686,10 +784,11 @@ def savetxt(fname, X, fmt='%.18e',delimiter=' '):
import re
def fromregex(file, regexp, dtype):
"""
- Construct an array from a text file, using regular-expressions parsing.
+ Construct an array from a text file, using regular expression parsing.
- Array is constructed from all matches of the regular expression
- in the file. Groups in the regular expression are converted to fields.
+ The returned array is always a structured array, and is constructed from
+ all matches of the regular expression in the file. Groups in the regular
+ expression are converted to fields of the structured array.
Parameters
----------
@@ -698,18 +797,44 @@ def fromregex(file, regexp, dtype):
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
- dtype : dtype or dtype list
- Dtype for the structured array
+ dtype : dtype or list of dtypes
+ Dtype for the structured array.
+
+ Returns
+ -------
+ output : ndarray
+ The output array, containing the part of the content of `file` that
+ was matched by `regexp`. `output` is always a structured array.
+
+ Raises
+ ------
+ TypeError
+ When `dtype` is not a valid dtype for a structured array.
+
+ See Also
+ --------
+ fromstring, loadtxt
+
+ Notes
+ -----
+ Dtypes for structured arrays can be specified in several forms, but all
+ forms specify at least the data type and field name. For details see
+ `doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
- >>> np.fromregex('test.dat', r"(\\d+)\\s+(...)",
- ... [('num', np.int64), ('key', 'S3')])
+
+ >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
+ >>> output = np.fromregex('test.dat', regexp,
+ [('num', np.int64), ('key', 'S3')])
+ >>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
+ >>> output['num']
+ array([1312, 1534, 444], dtype=int64)
"""
if not hasattr(file, "read"):
@@ -746,18 +871,18 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0,
names=None, excludelist=None, deletechars=None,
case_sensitive=True, unpack=None, usemask=False, loose=True):
"""
- Load data from a text file.
+ Load data from a text file, with missing values handled as specified.
- Each line past the first `skiprows` ones is split at the `delimiter`
+ Each line past the first `skiprows` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
- fname : {file, string}
+ fname : file or str
File or filename to read. If the filename extension is `.gz` or
`.bz2`, the file is first decompressed.
- dtype : dtype
- Data type of the resulting array. If this is a flexible data-type,
+ dtype : dtype, optional
+ Data type of the resulting array. If this is a structured data type,
the resulting array will be 1-dimensional, and each row will be
interpreted as an element of the array. In this case, the number
of columns used must match the number of fields in the data-type,
@@ -765,56 +890,58 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0,
of the dtype.
If None, the dtypes will be determined by the contents of each
column, individually.
- comments : string, optional
+ comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
- delimiter : string, optional
+ delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
- whitespace act as delimiter.
+ whitespaces act as delimiter. An integer or sequence of integers
+ can also be provided as width(s) of each field.
skiprows : int, optional
Numbers of lines to skip at the beginning of the file.
- converters : {None, dictionary}, optional
+ converters : dict or None, optional
A dictionary mapping column number to a function that will convert
values in the column to a number. Converters can also be used to
provide a default value for missing data:
``converters = {3: lambda s: float(s or 0)}``.
- missing : string, optional
+ missing : str, optional
A string representing a missing value, irrespective of the column where
it appears (e.g., `'missing'` or `'unused'`).
- missing_values : {None, dictionary}, optional
+ missing_values : dict or None, optional
A dictionary mapping a column number to a string indicating whether the
corresponding field should be masked.
- usecols : {None, sequence}, optional
+ usecols : sequence or None, optional
Which columns to read, with 0 being the first. For example,
- ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
- names : {None, True, string, sequence}, optional
+ ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
+ names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skiprows` lines.
If `names` is a sequence or a single-string of comma-separated names,
- the names will be used to define the field names in a flexible dtype.
+ the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
- deletechars : string, optional
+ deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
case_sensitive : {True, False, 'upper', 'lower'}, optional
- If True, field names are case_sensitive.
+ If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
- If True, returns a masked array.
- If False, return a regular standard array.
+ If True, return a masked array.
+ If False, return a regular array.
Returns
-------
- out : MaskedArray
- Data read from the text file.
+ out : ndarray
+ Data read from the text file. If `usemask` is True, this is a
+ masked array.
See Also
--------
@@ -824,12 +951,53 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0,
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
- * When the variable are named (either by a flexible dtype or with `names`,
- there must not be any header in the file (else a :exc:ValueError
+ * When the variables are named (either by a flexible dtype or with `names`,
+ there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
+ Examples
+ ---------
+ >>> from StringIO import StringIO
+ >>> import numpy as np
+
+ Comma delimited file with mixed dtype
+
+ >>> s = StringIO("1,1.3,abcde")
+ >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
+ ('mystring','S5')], delimiter=",")
+ >>> data
+ array((1, 1.3, 'abcde'),
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
+
+ Using dtype = None
+
+ >>> s.seek(0) # needed for StringIO example only
+ >>> data = np.genfromtxt(s, dtype=None,
+ names = ['myint','myfloat','mystring'], delimiter=",")
+ >>> data
+ array((1, 1.3, 'abcde'),
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
+
+ Specifying dtype and names
+
+ >>> s.seek(0)
+ >>> data = np.genfromtxt(s, dtype="i8,f8,S5",
+ names=['myint','myfloat','mystring'], delimiter=",")
+ >>> data
+ array((1, 1.3, 'abcde'),
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
+
+ An example with fixed-width columns
+
+ >>> s = StringIO("11.3abcde")
+ >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
+ delimiter=[1,3,5])
+ >>> data
+ array((1, 1.3, 'abcde'),
+ dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
+
"""
#
if usemask:
@@ -1114,15 +1282,15 @@ def ndfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0,
usecols=None, unpack=None, names=None,
excludelist=None, deletechars=None, case_sensitive=True,):
"""
- Load ASCII data stored in fname and returns a ndarray.
-
+ Load ASCII data stored in a file and return it as a single array.
+
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
-
+
See Also
--------
numpy.genfromtxt : generic function.
-
+
"""
kwargs = dict(dtype=dtype, comments=comments, delimiter=delimiter,
skiprows=skiprows, converters=converters,
@@ -1137,14 +1305,14 @@ def mafromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0,
usecols=None, unpack=None, names=None,
excludelist=None, deletechars=None, case_sensitive=True,):
"""
- Load ASCII data stored in fname and returns a MaskedArray.
-
- Complete description of all the optional input parameters is available in
- the docstring of the `genfromtxt` function.
-
+ Load ASCII data stored in a text file and return a masked array.
+
+ For a complete description of all the input parameters, see `genfromtxt`.
+
See Also
--------
- numpy.genfromtxt : generic function.
+ numpy.genfromtxt : generic function to load ASCII data.
+
"""
kwargs = dict(dtype=dtype, comments=comments, delimiter=delimiter,
skiprows=skiprows, converters=converters,
@@ -1162,8 +1330,10 @@ def recfromtxt(fname, dtype=None, comments='#', delimiter=None, skiprows=0,
excludelist=None, deletechars=None, case_sensitive=True,
usemask=False):
"""
- Load ASCII data stored in fname and returns a standard recarray (if
- `usemask=False`) or a MaskedRecords (if `usemask=True`).
+ Load ASCII data from a file and return it in a record array.
+
+ If ``usemask=False`` a standard `recarray` is returned,
+ if ``usemask=True`` a MaskedRecords array is returned.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
@@ -1174,8 +1344,8 @@ def recfromtxt(fname, dtype=None, comments='#', delimiter=None, skiprows=0,
Notes
-----
- * by default, `dtype=None`, which means that the dtype of the output array
- will be determined from the data.
+ By default, `dtype` is None, which means that the data-type of the output
+ array will be determined from the data.
"""
kwargs = dict(dtype=dtype, comments=comments, delimiter=delimiter,
@@ -1199,15 +1369,18 @@ def recfromcsv(fname, dtype=None, comments='#', skiprows=0,
excludelist=None, deletechars=None, case_sensitive='lower',
usemask=False):
"""
- Load ASCII data stored in comma-separated file and returns a recarray (if
- `usemask=False`) or a MaskedRecords (if `usemask=True`).
-
- Complete description of all the optional input parameters is available in
- the docstring of the `genfromtxt` function.
-
+ Load ASCII data stored in a comma-separated file.
+
+ The returned array is a record array (if ``usemask=False``, see
+ `recarray`) or a masked record array (if ``usemask=True``,
+ see `ma.mrecords.MaskedRecords`).
+
+ For a complete description of all the input parameters, see `genfromtxt`.
+
See Also
--------
- numpy.genfromtxt : generic function
+ numpy.genfromtxt : generic function to load ASCII data.
+
"""
kwargs = dict(dtype=dtype, comments=comments, delimiter=",",
skiprows=skiprows, converters=converters,
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index bab32bf02..cf6cd65be 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -17,54 +17,99 @@ from numpy.lib.type_check import iscomplex, real, imag
from numpy.linalg import eigvals, lstsq
class RankWarning(UserWarning):
- """Issued by polyfit when Vandermonde matrix is rank deficient.
+ """
+ Issued by `polyfit` when the Vandermonde matrix is rank deficient.
+
+ For more information, a way to suppress the warning, and an example of
+ `RankWarning` being issued, see `polyfit`.
+
"""
pass
def poly(seq_of_zeros):
"""
- Return polynomial coefficients given a sequence of roots.
-
- Calculate the coefficients of a polynomial given the zeros
- of the polynomial.
+ Find the coefficients of a polynomial with the given sequence of roots.
- If a square matrix is given, then the coefficients for
- characteristic equation of the matrix, defined by
- :math:`\\mathrm{det}(\\mathbf{A} - \\lambda \\mathbf{I})`,
- are returned.
+ Returns the coefficients of the polynomial whose leading coefficient
+ is one for the given sequence of zeros (multiple roots must be included
+ in the sequence as many times as their multiplicity; see Examples).
+ A square matrix (or array, which will be treated as a matrix) can also
+ be given, in which case the coefficients of the characteristic polynomial
+ of the matrix are returned.
Parameters
----------
- seq_of_zeros : ndarray
- A sequence of polynomial roots or a square matrix.
+ seq_of_zeros : array_like, shape (N,) or (N, N)
+ A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
- coefs : ndarray
- A sequence of polynomial coefficients representing the polynomial
+ c : ndarray
+ 1D array of polynomial coefficients from highest to lowest degree:
- :math:`\\mathrm{coefs}[0] x^{n-1} + \\mathrm{coefs}[1] x^{n-2} +
- ... + \\mathrm{coefs}[2] x + \\mathrm{coefs}[n]`
+ ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
+ where c[0] always equals 1.
+
+ Raises
+ ------
+ ValueError
+ If input is the wrong shape (the input must be a 1-D or square
+ 2-D array).
See Also
--------
- numpy.poly1d : A one-dimensional polynomial class.
- numpy.roots : Return the roots of the polynomial coefficients in p
- numpy.polyfit : Least squares polynomial fit
+ polyval : Evaluate a polynomial at a point.
+ roots : Return the roots of a polynomial.
+ polyfit : Least squares polynomial fit.
+ poly1d : A one-dimensional polynomial class.
+
+ Notes
+ -----
+ Specifying the roots of a polynomial still leaves one degree of
+ freedom, typically represented by an undetermined leading
+ coefficient. [1]_ In the case of this function, that coefficient -
+ the first one in the returned array - is always taken as one. (If
+ for some reason you have one other point, the only automatic way
+ presently to leverage that information is to use ``polyfit``.)
+
+ The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
+ matrix **A** is given by
+
+ :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
+
+ where **I** is the `n`-by-`n` identity matrix. [2]_
+
+ References
+ ----------
+ .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
+ Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
+
+ .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
+ Academic Press, pg. 182, 1980.
Examples
--------
- Given a sequence of polynomial zeros,
+ Given a sequence of a polynomial's zeros:
- >>> b = np.roots([1, 3, 1, 5, 6])
- >>> np.poly(b)
- array([ 1., 3., 1., 5., 6.])
+ >>> np.poly((0, 0, 0)) # Multiple root example
+ array([1, 0, 0, 0]) # i.e., z**3 + 0*z**2 + 0*z + 0
+ >>> np.poly((-1./2, 0, 1./2))
+ array([ 1. , 0. , -0.25, 0. ]) # z**3 - z/4
+ >>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
+ array([ 1. , -0.77086955, 0.08618131, 0. ])
- Given a square matrix,
+ Given a square array object:
- >>> P = np.array([[19, 3], [-2, 26]])
+ >>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
- array([ 1., -45., 500.])
+ array([ 1. , 0. , 0.16666667])
+
+ Or a square matrix object:
+
+ >>> np.poly(np.matrix(P))
+ array([ 1. , 0. , 0.16666667])
+
+ Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
@@ -118,12 +163,32 @@ def roots(p):
ValueError:
When `p` cannot be converted to a rank-1 array.
+ See also
+ --------
+
+ poly : Find the coefficients of a polynomial with
+ a given sequence of roots.
+ polyval : Evaluate a polynomial at a point.
+ polyfit : Least squares polynomial fit.
+ poly1d : A one-dimensional polynomial class.
+
+ Notes
+ -----
+
+ The algorithm relies on computing the eigenvalues of the
+ companion matrix [1]_.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Companion matrix",
+ http://en.wikipedia.org/wiki/Companion_matrix
+
Examples
--------
>>> coeff = [3.2, 2, 1]
- >>> print np.roots(coeff)
- [-0.3125+0.46351241j -0.3125-0.46351241j]
+ >>> np.roots(coeff)
+ array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
@@ -503,29 +568,32 @@ def polyval(p, x):
"""
Evaluate a polynomial at specific values.
- If p is of length N, this function returns the value:
+ If `p` is of length N, this function returns the value:
- p[0]*(x**N-1) + p[1]*(x**N-2) + ... + p[N-2]*x + p[N-1]
+ ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
- If x is a sequence then p(x) will be returned for all elements of x.
- If x is another polynomial then the composite polynomial p(x) will
- be returned.
+ If `x` is a sequence, then `p(x)` is returned for each element of `x`.
+ If `x` is another polynomial then the composite polynomial `p(x(t))`
+ is returned.
Parameters
----------
- p : {array_like, poly1d}
- 1D array of polynomial coefficients from highest degree to zero or an
+ p : array_like or poly1d object
+ 1D array of polynomial coefficients (including coefficients equal
+ to zero) from highest degree to the constant term, or an
instance of poly1d.
- x : {array_like, poly1d}
- A number, a 1D array of numbers, or an instance of poly1d.
+ x : array_like or poly1d object
+ A number, a 1D array of numbers, or an instance of poly1d, "at"
+ which to evaluate `p`.
Returns
-------
- values : {ndarray, poly1d}
- If either p or x is an instance of poly1d, then an instance of poly1d
- is returned, otherwise a 1D array is returned. In the case where x is
- a poly1d, the result is the composition of the two polynomials, i.e.,
- substitution is used.
+ values : ndarray or poly1d
+ If `x` is a poly1d instance, the result is the composition of the two
+ polynomials, i.e., `x` is "substituted" in `p` and the simplified
+ result is returned. In addition, the type of `x` - array_like or
+ poly1d - governs the type of the output: `x` array_like => `values`
+ array_like, `x` a poly1d object => `values` is also.
See Also
--------
@@ -533,15 +601,26 @@ def polyval(p, x):
Notes
-----
- Horner's method is used to evaluate the polynomial. Even so, for
- polynomials of high degree the values may be inaccurate due to
+ Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
+ for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
+ References
+ ----------
+ .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
+ trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
+ Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
+ >>> np.polyval([3,0,1], np.poly1d(5))
+ poly1d([ 76.])
+ >>> np.polyval(np.poly1d([3,0,1]), 5)
+ 76
+ >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
+ poly1d([ 76.])
"""
p = NX.asarray(p)
@@ -556,26 +635,46 @@ def polyval(p, x):
def polyadd(a1, a2):
"""
- Returns sum of two polynomials.
+ Find the sum of two polynomials.
- Returns sum of polynomials; `a1` + `a2`. Input polynomials are
- represented as an array_like sequence of terms or a poly1d object.
+ Returns the polynomial resulting from the sum of two input polynomials.
+ Each input must be either a poly1d object or a 1D sequence of polynomial
+ coefficients, from highest to lowest degree.
Parameters
----------
- a1 : {array_like, poly1d}
- Polynomial as sequence of terms.
- a2 : {array_like, poly1d}
- Polynomial as sequence of terms.
+ a1, a2 : array_like or poly1d object
+ Input polynomials.
Returns
-------
- out : {ndarray, poly1d}
- Array representing the polynomial terms.
+ out : ndarray or poly1d object
+ The sum of the inputs. If either input is a poly1d object, then the
+ output is also a poly1d object. Otherwise, it is a 1D array of
+ polynomial coefficients from highest to lowest degree.
See Also
--------
- polyval, polydiv, polymul, polyadd
+ poly1d : A one-dimensional polynomial class.
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
+
+ Examples
+ --------
+ >>> np.polyadd([1, 2], [9, 5, 4])
+ array([9, 6, 6])
+
+ Using poly1d objects:
+
+ >>> p1 = np.poly1d([1, 2])
+ >>> p2 = np.poly1d([9, 5, 4])
+ >>> print p1
+ 1 x + 2
+ >>> print p2
+ 2
+ 9 x + 5 x + 4
+ >>> print np.polyadd(p1, p2)
+ 2
+ 9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
@@ -596,22 +695,21 @@ def polyadd(a1, a2):
def polysub(a1, a2):
"""
- Returns difference from subtraction of two polynomials input as sequences.
+ Difference (subtraction) of two polynomials.
- Returns difference of polynomials; `a1` - `a2`. Input polynomials are
- represented as an array_like sequence of terms or a poly1d object.
+ Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
+ `a1` and `a2` can be either array_like sequences of the polynomials'
+ coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
- a1 : {array_like, poly1d}
- Minuend polynomial as sequence of terms.
- a2 : {array_like, poly1d}
- Subtrahend polynomial as sequence of terms.
+ a1, a2 : array_like or poly1d
+ Minuend and subtrahend polynomials, respectively.
Returns
-------
- out : {ndarray, poly1d}
- Array representing the polynomial terms.
+ out : ndarray or poly1d
+ Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
@@ -644,28 +742,50 @@ def polysub(a1, a2):
def polymul(a1, a2):
"""
- Returns product of two polynomials represented as sequences.
+ Find the product of two polynomials.
- The input arrays specify the polynomial terms in turn with a length equal
- to the polynomial degree plus 1.
+ Finds the polynomial resulting from the multiplication of the two input
+ polynomials. Each input must be either a poly1d object or a 1D sequence
+ of polynomial coefficients, from highest to lowest degree.
Parameters
----------
- a1 : {array_like, poly1d}
- First multiplier polynomial.
- a2 : {array_like, poly1d}
- Second multiplier polynomial.
+ a1, a2 : array_like or poly1d object
+ Input polynomials.
Returns
-------
- out : {ndarray, poly1d}
- Product of inputs.
+ out : ndarray or poly1d object
+ The polynomial resulting from the multiplication of the inputs. If
+ either inputs is a poly1d object, then the output is also a poly1d
+ object. Otherwise, it is a 1D array of polynomial coefficients from
+ highest to lowest degree.
See Also
--------
+ poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
+ Examples
+ --------
+ >>> np.polymul([1, 2, 3], [9, 5, 1])
+ array([ 9, 23, 38, 17, 3])
+
+ Using poly1d objects:
+
+ >>> p1 = np.poly1d([1, 2, 3])
+ >>> p2 = np.poly1d([9, 5, 1])
+ >>> print p1
+ 2
+ 1 x + 2 x + 3
+ >>> print p2
+ 2
+ 9 x + 5 x + 1
+ >>> print np.polymul(p1, p2)
+ 4 3 2
+ 9 x + 23 x + 38 x + 17 x + 3
+
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1,a2 = poly1d(a1),poly1d(a2)
@@ -678,28 +798,37 @@ def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
- The input arrays specify the polynomial terms in turn with a length equal
- to the polynomial degree plus 1.
+ The input arrays are the coefficients (including any coefficients
+ equal to zero) of the "numerator" (dividend) and "denominator"
+ (divisor) polynomials, respectively.
Parameters
----------
- u : {array_like, poly1d}
- Dividend polynomial.
- v : {array_like, poly1d}
- Divisor polynomial.
+ u : array_like or poly1d
+ Dividend polynomial's coefficients.
+
+ v : array_like or poly1d
+ Divisor polynomial's coefficients.
Returns
-------
q : ndarray
- Polynomial terms of quotient.
+ Coefficients, including those equal to zero, of the quotient.
r : ndarray
- Remainder of polynomial division.
+ Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
+ Notes
+ -----
+ Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
+ not equal `v.ndim`. In other words, all four possible combinations -
+ ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
+ ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
+
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
@@ -762,15 +891,25 @@ class poly1d(object):
"""
A one-dimensional polynomial class.
+ A convenience class, used to encapsulate "natural" operations on
+ polynomials so that said operations may take on their customary
+ form in code (see Examples).
+
Parameters
----------
c_or_r : array_like
- Polynomial coefficients, in decreasing powers. For example,
- ``(1, 2, 3)`` implies :math:`x^2 + 2x + 3`. If `r` is set
- to True, these coefficients specify the polynomial roots
- (values where the polynomial evaluate to 0) instead.
+ The polynomial's coefficients, in decreasing powers, or if
+ the value of the second parameter is True, the polynomial's
+ roots (values where the polynomial evaluates to 0). For example,
+ ``poly1d([1, 2, 3])`` returns an object that represents
+ :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
+ one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
- If True, `c_or_r` gives the polynomial roots. Default is False.
+ If True, `c_or_r` specifies the polynomial's roots; the default
+ is False.
+ variable : str, optional
+ Changes the variable used when printing `p` from `x` to `variable`
+ (see Examples).
Examples
--------
@@ -781,7 +920,7 @@ class poly1d(object):
2
1 x + 2 x + 3
- Evaluate the polynomial:
+ Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
@@ -790,6 +929,8 @@ class poly1d(object):
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
+ >>> p(p.r)
+ array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # i.e., (0, 0)
Show the coefficients:
@@ -807,7 +948,7 @@ class poly1d(object):
>>> p[1]
2
- Polynomials can be added, substracted, multplied and divided
+ Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py
index 0e1bafa91..a3fe1def7 100644
--- a/numpy/lib/scimath.py
+++ b/numpy/lib/scimath.py
@@ -218,29 +218,44 @@ def sqrt(x):
def log(x):
"""
- Return the natural logarithm of x.
+ Compute the natural logarithm of `x`.
- If x contains negative inputs, the answer is computed and returned in the
- complex domain.
+ Return the "principal value" (for a description of this, see `numpy.log`)
+ of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)``
+ returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the
+ complex principle value is returned.
Parameters
----------
- x : array_like
+ x : array_like or scalar
+ The value(s) whose log is (are) required.
Returns
-------
- out : array_like
+ out : ndarray or scalar
+ The log of the `x` value(s). If `x` was a scalar, so is `out`,
+ otherwise an array object is returned.
+
+ See Also
+ --------
+ numpy.log
+
+ Notes
+ -----
+ For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log`
+ (note, however, that otherwise `numpy.log` and this `log` are identical,
+ i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and,
+ notably, the complex principle value if ``x.imag != 0``).
Examples
--------
- >>> import math
- >>> np.lib.scimath.log(math.exp(1))
+ >>> np.emath.log(np.exp(1))
1.0
- Negative arguments are correctly handled (recall that for negative
- arguments, the identity exp(log(z))==z does not hold anymore):
+ Negative arguments are handled "correctly" (recall that `exp(log(x)) == x`
+ does *not* hold for real `x < 0`):
- >>> np.lib.scimath.log(-math.exp(1)) == (1+1j*math.pi)
+ >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j)
True
"""
@@ -249,18 +264,34 @@ def log(x):
def log10(x):
"""
- Return the base 10 logarithm of x.
+ Compute the logarithm base 10 of `x`.
- If x contains negative inputs, the answer is computed and returned in the
- complex domain.
+ Return the "principal value" (for a description of this, see
+ `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this
+ is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)``
+ returns ``inf``). Otherwise, the complex principle value is returned.
Parameters
----------
- x : array_like
+ x : array_like or scalar
+ The value(s) whose log base 10 is (are) required.
Returns
-------
- out : array_like
+ out : ndarray or scalar
+ The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`,
+ otherwise an array object is returned.
+
+ See Also
+ --------
+ numpy.log10
+
+ Notes
+ -----
+ For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10`
+ (note, however, that otherwise `numpy.log10` and this `log10` are
+ identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
+ and, notably, the complex principle value if ``x.imag != 0``).
Examples
--------
@@ -269,11 +300,10 @@ def log10(x):
>>> np.set_printoptions(precision=4)
- >>> np.lib.scimath.log10([10**1,10**2])
- array([ 1., 2.])
-
+ >>> np.emath.log10(10**1)
+ 1.0
- >>> np.lib.scimath.log10([-10**1,-10**2,10**2])
+ >>> np.emath.log10([-10**1, -10**2, 10**2])
array([ 1.+1.3644j, 2.+1.3644j, 2.+0.j ])
"""
@@ -315,18 +345,34 @@ def logn(n, x):
def log2(x):
"""
- Take log base 2 of x.
+ Compute the logarithm base 2 of `x`.
- If x contains negative inputs, the answer is computed and returned in the
- complex domain.
+ Return the "principal value" (for a description of this, see
+ `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is
+ a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns
+ ``inf``). Otherwise, the complex principle value is returned.
Parameters
----------
- x : array_like
+ x : array_like or scalar
+ The value(s) whose log base 2 is (are) required.
Returns
-------
- out : array_like
+ out : ndarray or scalar
+ The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`,
+ otherwise an array object is returned.
+
+ See Also
+ --------
+ numpy.log2
+
+ Notes
+ -----
+ For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2`
+ (note, however, that otherwise `numpy.log2` and this `log2` are
+ identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
+ and, notably, the complex principle value if ``x.imag != 0``).
Examples
--------
@@ -335,10 +381,10 @@ def log2(x):
>>> np.set_printoptions(precision=4)
- >>> np.lib.scimath.log2([4,8])
- array([ 2., 3.])
+ >>> np.emath.log2(8)
+ 3.0
- >>> np.lib.scimath.log2([-4,-8,8])
+ >>> np.emath.log2([-4, -8, 8])
array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ])
"""
@@ -383,29 +429,44 @@ def power(x, p):
return nx.power(x, p)
def arccos(x):
- """Compute the inverse cosine of x.
-
- For real x with abs(x)<=1, this returns the principal value.
+ """
+ Compute the inverse cosine of x.
- If abs(x)>1, the complex arccos() is computed.
+ Return the "principal value" (for a description of this, see
+ `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that
+ `abs(x) <= 1`, this is a real number in the closed interval
+ :math:`[0, \\pi]`. Otherwise, the complex principle value is returned.
Parameters
----------
- x : array_like
+ x : array_like or scalar
+ The value(s) whose arccos is (are) required.
Returns
-------
- array_like
+ out : ndarray or scalar
+ The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so
+ is `out`, otherwise an array object is returned.
+
+ See Also
+ --------
+ numpy.arccos
+
+ Notes
+ -----
+ For an arccos() that returns ``NAN`` when real `x` is not in the
+ interval ``[-1,1]``, use `numpy.arccos`.
Examples
--------
>>> np.set_printoptions(precision=4)
- >>> np.lib.scimath.arccos(1)
+ >>> np.emath.arccos(1) # a scalar is returned
0.0
- >>> np.lib.scimath.arccos([1,2])
+ >>> np.emath.arccos([1,2])
array([ 0.-0.j , 0.+1.317j])
+
"""
x = _fix_real_abs_gt_1(x)
return nx.arccos(x)
@@ -414,28 +475,40 @@ def arcsin(x):
"""
Compute the inverse sine of x.
- For real x with abs(x)<=1, this returns the principal value.
-
- If abs(x)>1, the complex arcsin() is computed.
+ Return the "principal value" (for a description of this, see
+ `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that
+ `abs(x) <= 1`, this is a real number in the closed interval
+ :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is
+ returned.
Parameters
----------
- x : array_like
+ x : array_like or scalar
+ The value(s) whose arcsin is (are) required.
Returns
-------
- array_like
+ out : ndarray or scalar
+ The inverse sine(s) of the `x` value(s). If `x` was a scalar, so
+ is `out`, otherwise an array object is returned.
- Examples
+ See Also
--------
- (We set the printing precision so the example can be auto-tested)
+ numpy.arcsin
+
+ Notes
+ -----
+ For an arcsin() that returns ``NAN`` when real `x` is not in the
+ interval ``[-1,1]``, use `numpy.arcsin`.
+ Examples
+ --------
>>> np.set_printoptions(precision=4)
- >>> np.lib.scimath.arcsin(0)
+ >>> np.emath.arcsin(0)
0.0
- >>> np.lib.scimath.arcsin([0,1])
+ >>> np.emath.arcsin([0,1])
array([ 0. , 1.5708])
"""
@@ -444,30 +517,46 @@ def arcsin(x):
def arctanh(x):
"""
- Compute the inverse hyperbolic tangent of x.
+ Compute the inverse hyperbolic tangent of `x`.
- For real x with abs(x)<=1, this returns the principal value.
-
- If abs(x)>1, the complex arctanh() is computed.
+ Return the "principal value" (for a description of this, see
+ `numpy.arctanh`) of `arctanh(x)`. For real `x` such that
+ `abs(x) < 1`, this is a real number. If `abs(x) > 1`, or if `x` is
+ complex, the result is complex. Finally, `x = 1` returns``inf`` and
+ `x=-1` returns ``-inf``.
Parameters
----------
- x : array_like
+ x : array_like or scalar
+ The value(s) whose arctanh is (are) required.
Returns
-------
- out : array_like
+ out : ndarray or scalar
+ The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was
+ a scalar so is `out`, otherwise an array object is returned.
+
+
+ See Also
+ --------
+ numpy.arctanh
+
+ Notes
+ -----
+ For an arctanh() that returns ``NAN`` when real `x` is not in the
+ interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does
+ return +/-inf for `x = +/-1`).
Examples
--------
- (We set the printing precision so the example can be auto-tested)
>>> np.set_printoptions(precision=4)
- >>> np.lib.scimath.arctanh(0)
- 0.0
+ >>> np.emath.arctanh(np.matrix(np.eye(2))) # Note: an array is returned
+ array([[ Inf, 0.],
+ [ 0., Inf]])
- >>> np.lib.scimath.arctanh([0,2])
- array([ 0.0000+0.j , 0.5493-1.5708j])
+ >>> np.emath.arctanh([1j])
+ array([ 0.+0.7854j])
"""
x = _fix_real_abs_gt_1(x)
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index c7a953491..ebd6d5a22 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -1,4 +1,9 @@
-""" Utilities that manipulate strides to achieve desirable effects.
+"""
+Utilities that manipulate strides to achieve desirable effects.
+
+An explanation of strides can be found in the "ndarray.rst" file in the
+NumPy reference guide.
+
"""
import numpy as np
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 46294fbd9..0d53bbb68 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -221,7 +221,9 @@ def diag(v, k=0):
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
- Diagonal in question. The default is 0.
+ Diagonal in question. The default is 0. Use `k>0` for diagonals
+ above the main diagonal, and `k<0` for diagonals below the main
+ diagonal.
Returns
-------
@@ -233,6 +235,8 @@ def diag(v, k=0):
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
+ triu : Upper triangle of an array.
+ tril : Lower triange of an array.
Examples
--------
@@ -244,6 +248,10 @@ def diag(v, k=0):
>>> np.diag(x)
array([0, 4, 8])
+ >>> np.diag(x, k=1)
+ array([1, 5])
+ >>> np.diag(x, k=-1)
+ array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
@@ -441,14 +449,17 @@ def vander(x, N=None):
The columns of the output matrix are decreasing powers of the input
vector. Specifically, the i-th output column is the input vector to
- the power of ``N - i - 1``.
+ the power of ``N - i - 1``. Such a matrix with a geometric progression
+ in each row is named Van Der Monde, or Vandermonde matrix, from
+ Alexandre-Theophile Vandermonde.
Parameters
----------
x : array_like
- Input array.
+ 1-D input array.
N : int, optional
- Order of (number of columns in) the output.
+ Order of (number of columns in) the output. If `N` is not specified,
+ a square array is returned (``N = len(x)``).
Returns
-------
@@ -456,6 +467,11 @@ def vander(x, N=None):
Van der Monde matrix of order `N`. The first column is ``x^(N-1)``,
the second ``x^(N-2)`` and so forth.
+ References
+ ----------
+ .. [1] Wikipedia, "Vandermonde matrix",
+ http://en.wikipedia.org/wiki/Vandermonde_matrix
+
Examples
--------
>>> x = np.array([1, 2, 3, 5])
@@ -472,6 +488,21 @@ def vander(x, N=None):
[ 9, 3, 1],
[25, 5, 1]])
+ >>> x = np.array([1, 2, 3, 5])
+ >>> np.vander(x)
+ array([[ 1, 1, 1, 1],
+ [ 8, 4, 2, 1],
+ [ 27, 9, 3, 1],
+ [125, 25, 5, 1]])
+
+ The determinant of a square Vandermonde matrix is the product
+ of the differences between the values of the input vector:
+
+ >>> np.linalg.det(np.vander(x))
+ 48.000000000000043
+ >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
+ 48
+
"""
x = asarray(x)
if N is None: N=len(x)
@@ -483,46 +514,46 @@ def vander(x, N=None):
def histogram2d(x,y, bins=10, range=None, normed=False, weights=None):
"""
- Compute the bidimensional histogram of two data samples.
+ Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape(N,)
- A sequence of values to be histogrammed along the first dimension.
+ A sequence of values to be histogrammed along the first dimension.
y : array_like, shape(M,)
- A sequence of values to be histogrammed along the second dimension.
- bins : int or [int, int] or array-like or [array, array], optional
- The bin specification:
+ A sequence of values to be histogrammed along the second dimension.
+ bins : int or [int, int] or array_like or [array, array], optional
+ The bin specification:
- * the number of bins for the two dimensions (nx=ny=bins),
- * the number of bins in each dimension (nx, ny = bins),
- * the bin edges for the two dimensions (x_edges=y_edges=bins),
- * the bin edges in each dimension (x_edges, y_edges = bins).
+ * If int, the number of bins for the two dimensions (nx=ny=bins).
+ * If [int, int], the number of bins in each dimension (nx, ny = bins).
+ * If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins).
+ * If [array, array], the bin edges in each dimension (x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
- The leftmost and rightmost edges of the bins along each dimension
- (if not specified explicitly in the `bins` parameters):
- [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
- considered outliers and not tallied in the histogram.
- normed : boolean, optional
- If False, returns the number of samples in each bin. If True, returns
- the bin density, ie, the bin count divided by the bin area.
- weights : array-like, shape(N,), optional
- An array of values `w_i` weighing each sample `(x_i, y_i)`. Weights are
- normalized to 1 if normed is True. If normed is False, the values of the
- returned histogram are equal to the sum of the weights belonging to the
- samples falling into each bin.
+ The leftmost and rightmost edges of the bins along each dimension
+ (if not specified explicitly in the `bins` parameters):
+ ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
+ will be considered outliers and not tallied in the histogram.
+ normed : bool, optional
+ If False, returns the number of samples in each bin. If True, returns
+ the bin density, i.e. the bin count divided by the bin area.
+ weights : array_like, shape(N,), optional
+ An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights
+ are normalized to 1 if `normed` is True. If `normed` is False, the
+ values of the returned histogram are equal to the sum of the weights
+ belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
- The bidimensional histogram of samples x and y. Values in x are
- histogrammed along the first dimension and values in y are histogrammed
- along the second dimension.
+ The bi-dimensional histogram of samples `x` and `y`. Values in `x`
+ are histogrammed along the first dimension and values in `y` are
+ histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
- The bin edges along the first dimension.
+ The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
- The bin edges along the second dimension.
+ The bin edges along the second dimension.
See Also
--------
@@ -531,28 +562,36 @@ def histogram2d(x,y, bins=10, range=None, normed=False, weights=None):
Notes
-----
- When normed is True, then the returned histogram is the sample density,
+ When `normed` is True, then the returned histogram is the sample density,
defined such that:
- .. math::
- \\sum_{i=0}^{nx-1} \\sum_{j=0}^{ny-1} H_{i,j} \\Delta x_i \\Delta y_j = 1
+ .. math::
+ \\sum_{i=0}^{nx-1} \\sum_{j=0}^{ny-1} H_{i,j} \\Delta x_i \\Delta y_j = 1
- where :math:`H` is the histogram array and :math:`\\Delta x_i \\Delta y_i`
- the area of bin :math:`{i,j}`.
+ where `H` is the histogram array and :math:`\\Delta x_i \\Delta y_i`
+ the area of bin `{i,j}`.
- Please note that the histogram does not follow the cartesian convention
+ Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abcissa and `y` values on the ordinate axis.
Rather, `x` is histogrammed along the first dimension of the array
(vertical), and `y` along the second dimension of the array (horizontal).
- This ensures compatibility with `histogrammdd`.
+ This ensures compatibility with `histogramdd`.
Examples
--------
- >>> x,y = np.random.randn(2,100)
- >>> H, xedges, yedges = np.histogram2d(x, y, bins = (5, 8))
+ >>> x, y = np.random.randn(2, 100)
+ >>> H, xedges, yedges = np.histogram2d(x, y, bins=(5, 8))
>>> H.shape, xedges.shape, yedges.shape
((5,8), (6,), (9,))
+ We can now use the Matplotlib to visualize this 2-dimensional histogram:
+
+ >>> extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
+ >>> import matplotlib.pyplot as plt
+ >>> plt.imshow(H, extent=extent)
+ <matplotlib.image.AxesImage object at ...>
+ >>> plt.show()
+
"""
from numpy import histogramdd
@@ -569,33 +608,37 @@ def histogram2d(x,y, bins=10, range=None, normed=False, weights=None):
def mask_indices(n,mask_func,k=0):
- """Return the indices to access (n,n) arrays, given a masking function.
+ """
+ Return the indices to access (n, n) arrays, given a masking function.
- Assume mask_func() is a function that, for a square array a of size (n,n)
- with a possible offset argument k, when called as mask_func(a,k) returns a
- new array with zeros in certain locations (functions like triu() or tril()
- do precisely this). Then this function returns the indices where the
- non-zero values would be located.
+ Assume `mask_func` is a function that, for a square array a of size
+ ``(n, n)`` with a possible offset argument `k`, when called as
+ ``mask_func(a, k)`` returns a new array with zeros in certain locations
+ (functions like `triu` or `tril` do precisely this). Then this function
+ returns the indices where the non-zero values would be located.
Parameters
----------
n : int
- The returned indices will be valid to access arrays of shape (n,n).
-
+ The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
- A function whose api is similar to that of numpy.tri{u,l}. That is,
- mask_func(x,k) returns a boolean array, shaped like x. k is an optional
- argument to the function.
-
+ A function whose call signature is similar to that of `triu`, `tril`.
+ That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
+ `k` is an optional argument to the function.
k : scalar
- An optional argument which is passed through to mask_func(). Functions
- like tri{u,l} take a second argument that is interpreted as an offset.
+ An optional argument which is passed through to `mask_func`. Functions
+ like `triu`, `tril` take a second argument that is interpreted as an
+ offset.
Returns
-------
- indices : an n-tuple of index arrays.
- The indices corresponding to the locations where mask_func(ones((n,n)),k)
- is True.
+ indices : tuple of arrays.
+ The `n` arrays of indices corresponding to the locations where
+ ``mask_func(np.ones((n, n)), k)`` is True.
+
+ See Also
+ --------
+ triu, tril, triu_indices, tril_indices
Notes
-----
@@ -605,27 +648,30 @@ def mask_indices(n,mask_func,k=0):
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
- >>> iu = mask_indices(3,np.triu)
+
+ >>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
- >>> a = np.arange(9).reshape(3,3)
+
+ >>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
-
- Then:
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
- >>> iu1 = mask_indices(3,np.triu,1)
+
+ >>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
+
>>> a[iu1]
array([1, 2, 5])
- """
+
+ """
m = ones((n,n),int)
a = mask_func(m,k)
return where(a != 0)
@@ -704,17 +750,21 @@ def tril_indices(n,k=0):
def tril_indices_from(arr,k=0):
- """Return the indices for the lower-triangle of an (n,n) array.
+ """
+ Return the indices for the lower-triangle of an (n, n) array.
+
+ See `tril_indices` for full details.
- See tril_indices() for full details.
-
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
-
k : int, optional
- Diagonal offset (see tril() for details).
+ Diagonal offset (see `tril` for details).
+
+ See Also
+ --------
+ tril_indices, tril
Notes
-----
@@ -800,17 +850,21 @@ def triu_indices(n,k=0):
def triu_indices_from(arr,k=0):
- """Return the indices for the lower-triangle of an (n,n) array.
+ """
+ Return the indices for the lower-triangle of an (n, n) array.
+
+ See `triu_indices` for full details.
- See triu_indices() for full details.
-
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
-
k : int, optional
- Diagonal offset (see triu() for details).
+ Diagonal offset (see `triu` for details).
+
+ See Also
+ --------
+ triu_indices, triu
Notes
-----
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index babad2d62..4fc11bead 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -13,19 +13,47 @@ from ufunclike import isneginf, isposinf
_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
def mintypecode(typechars,typeset='GDFgdf',default='d'):
- """ Return a minimum data type character from typeset that
- handles all typechars given
+ """
+ Return the character for the minimum-size type to which given types can
+ be safely cast.
+
+ The returned type character must represent the smallest size dtype such
+ that an array of the returned type can handle the data from an array of
+ all types in `typechars` (or if `typechars` is an array, then its
+ dtype.char).
- The returned type character must be the smallest size such that
- an array of the returned type can handle the data from an array of
- type t for each t in typechars (or if typechars is an array,
- then its dtype.char).
+ Parameters
+ ----------
+ typechars : list of str or array_like
+ If a list of strings, each string should represent a dtype.
+ If array_like, the character representation of the array dtype is used.
+ typeset : str or list of str, optional
+ The set of characters that the returned character is chosen from.
+ The default set is 'GDFgdf'.
+ default : str, optional
+ The default character, this is returned if none of the characters in
+ `typechars` matches a character in `typeset`.
- If the typechars does not intersect with the typeset, then default
- is returned.
+ Returns
+ -------
+ typechar : str
+ The character representing the minimum-size type that was found.
+
+ See Also
+ --------
+ dtype, sctype2char, maximum_sctype
+
+ Examples
+ --------
+ >>> np.mintypecode(['d', 'f', 'S'])
+ 'd'
+ >>> x = np.array([1.1, 2-3.j])
+ >>> np.mintypecode(x)
+ 'D'
+
+ >>> np.mintypecode('abceh', default='G')
+ 'G'
- If t in typechars is not a string then t=asarray(t).dtype.char is
- applied.
"""
typecodes = [(type(t) is type('') and t) or asarray(t).dtype.char\
for t in typechars]
@@ -152,14 +180,14 @@ def iscomplex(x):
Returns
-------
- out : ndarray, bool
+ out : ndarray of bools
Output array.
See Also
--------
- isreal: Returns a bool array, where True if input element is real.
- iscomplexobj: Return True if x is a complex type or an array of complex
- numbers.
+ isreal
+ iscomplexobj : Return True if x is a complex type or an array of complex
+ numbers.
Examples
--------
@@ -177,8 +205,8 @@ def isreal(x):
"""
Returns a bool array, where True if input element is real.
- If the input value has a complex type but with complex part zero, the
- return value is True.
+ If element has complex type with zero complex part, the return value
+ for that element is True.
Parameters
----------
@@ -192,14 +220,13 @@ def isreal(x):
See Also
--------
- iscomplex: Return a bool array, where True if input element is complex
- (non-zero imaginary part).
- isrealobj: Return True if x is not a complex type.
+ iscomplex
+ isrealobj : Return True if x is not a complex type.
Examples
--------
>>> np.isreal([1+1j, 1+0j, 4.5, 3, 2, 2j])
- >>> array([False, True, True, True, True, False], dtype=bool)
+ array([False, True, True, True, True, False], dtype=bool)
"""
return imag(x) == 0
@@ -520,14 +547,19 @@ array_precision = {_nx.single : 0,
_nx.clongdouble : 2}
def common_type(*arrays):
"""
- Return the inexact scalar type which is most common in a list of arrays.
+ Return a scalar type which is common to the input arrays.
+
+ The return type will always be an inexact (i.e. floating point) scalar
+ type, even if all the arrays are integer arrays. If one of the inputs is
+ an integer array, the minimum precision type that is returned is a
+ 64-bit floating point dtype.
- The return type will always be an inexact scalar type, even if all the
- arrays are integer arrays
+ All input arrays can be safely cast to the returned dtype without loss
+ of information.
Parameters
----------
- array1, array2, ... : ndarray
+ array1, array2, ... : ndarrays
Input arrays.
Returns
@@ -537,12 +569,16 @@ def common_type(*arrays):
See Also
--------
- dtype
+ dtype, mintypecode
Examples
--------
- >>> np.common_type(np.arange(4), np.array([45,6]), np.array([45.0, 6.0]))
+ >>> np.common_type(np.arange(2, dtype=np.float32))
+ <type 'numpy.float32'>
+ >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2))
<type 'numpy.float64'>
+ >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))
+ <type 'numpy.complex128'>
"""
is_complex = False
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index a4072d7bd..4eefdb15e 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -14,9 +14,9 @@ __all__ = ['issubclass_', 'get_numpy_include', 'issubsctype',
def get_include():
"""
- Return the directory that contains the numpy \\*.h header files.
+ Return the directory that contains the NumPy \\*.h header files.
- Extension modules that need to compile against numpy should use this
+ Extension modules that need to compile against NumPy should use this
function to locate the appropriate include directory.
Notes
@@ -48,9 +48,24 @@ def get_numarray_include(type=None):
Extension modules that need to compile against numarray should use this
function to locate the appropriate include directory.
+ Parameters
+ ----------
+ type : any, optional
+ If `type` is not None, the location of the NumPy headers is returned
+ as well.
+
+ Returns
+ -------
+ dirs : str or list of str
+ If `type` is None, `dirs` is a string containing the path to the
+ numarray headers.
+ If `type` is not None, `dirs` is a list of strings with first the
+ path(s) to the numarray headers, followed by the path to the NumPy
+ headers.
+
Notes
-----
- When using ``distutils``, for example in ``setup.py``.
+ Useful when using ``distutils``, for example in ``setup.py``.
::
import numpy as np
@@ -84,24 +99,30 @@ def deprecate(func, oldname=None, newname=None):
"""
Deprecate old functions.
- Issues a DeprecationWarning, adds warning to oldname's docstring,
- rebinds oldname.__name__ and returns new function object.
+ Issues a DeprecationWarning, adds warning to `oldname`'s docstring,
+ rebinds ``oldname.__name__`` and returns the new function object.
Parameters
----------
func : function
-
- oldname : string
-
- newname : string
+ The function to be deprecated.
+ oldname : str, optional
+ The name of the function to be deprecated. Default is None, in which
+ case the name of `func` is used.
+ newname : str, optional
+ The new name for the function. Default is None, in which case
+ the deprecation message is that `oldname` is deprecated. If given,
+ the deprecation message is that `oldname` is deprecated and `newname`
+ should be used instead.
Returns
-------
old_func : function
+ The deprecated function.
Examples
--------
- Note that olduint returns a value after printing Deprecation Warning.
+ Note that ``olduint`` returns a value after printing Deprecation Warning:
>>> olduint = np.deprecate(np.uint)
>>> olduint(6)
@@ -126,7 +147,7 @@ def deprecate(func, oldname=None, newname=None):
depdoc = '%s is DEPRECATED!! -- use %s instead' % (oldname, newname,)
def newfunc(*args,**kwds):
- """Use get_include, get_numpy_include is DEPRECATED."""
+ """arrayrange is DEPRECATED!! -- use `arange` instead."""
warnings.warn(str1, DeprecationWarning)
return func(*args, **kwds)
@@ -184,7 +205,20 @@ def byte_bounds(a):
(low, high) : tuple of 2 integers
The first integer is the first byte of the array, the second integer is
just past the last byte of the array. If `a` is not contiguous it
- would not use every byte between the (`low`, `high`) values.
+ will not use every byte between the (`low`, `high`) values.
+
+ Examples
+ --------
+ >>> I = np.eye(2, dtype='f'); I.dtype
+ dtype('float32')
+ >>> low, high = np.byte_bounds(I)
+ >>> high - low == I.size*I.itemsize
+ True
+ >>> I = np.eye(2, dtype='G'); I.dtype
+ dtype('complex192')
+ >>> low, high = np.byte_bounds(I)
+ >>> high - low == I.size*I.itemsize
+ True
"""
ai = a.__array_interface__
@@ -267,7 +301,17 @@ def who(vardict=None):
Examples
--------
- >>> d = {'x': arange(2.0), 'y': arange(3.0), 'txt': 'Some str', 'idx': 5}
+ >>> a = np.arange(10)
+ >>> b = np.ones(20)
+ >>> np.who()
+ Name Shape Bytes Type
+ ===========================================================
+ a 10 40 int32
+ b 20 160 float64
+ Upper bound on total bytes = 200
+
+ >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str',
+ ... 'idx':5}
>>> np.whos(d)
Name Shape Bytes Type
===========================================================
@@ -844,6 +888,19 @@ def _lookfor_generate_cache(module, import_modules, regenerate):
# * raise SyntaxError instead of a custom exception.
class SafeEval(object):
+ """
+ Object to evaluate constant string expressions.
+
+ This includes strings with lists, dicts and tuples using the abstract
+ syntax tree created by ``compiler.parse``.
+
+ For an example of usage, see `safe_eval`.
+
+ See Also
+ --------
+ safe_eval
+
+ """
def visit(self, node, **kw):
cls = node.__class__
@@ -895,10 +952,12 @@ def safe_eval(source):
Parameters
----------
source : str
+ The string to evaluate.
Returns
-------
obj : object
+ The result of evaluating `source`.
Raises
------
@@ -908,25 +967,22 @@ def safe_eval(source):
Examples
--------
- >>> from numpy.lib.utils import safe_eval
- >>> safe_eval('1')
+ >>> np.safe_eval('1')
1
- >>> safe_eval('[1, 2, 3]')
+ >>> np.safe_eval('[1, 2, 3]')
[1, 2, 3]
- >>> safe_eval('{"foo": ("bar", 10.0)}')
+ >>> np.safe_eval('{"foo": ("bar", 10.0)}')
{'foo': ('bar', 10.0)}
- >>> safe_eval('import os')
+
+ >>> np.safe_eval('import os')
Traceback (most recent call last):
...
SyntaxError: invalid syntax
+
>>> safe_eval('open("/home/user/.ssh/id_dsa").read()')
Traceback (most recent call last):
...
SyntaxError: Unsupported source construct: compiler.ast.CallFunc
- >>> safe_eval('dict')
- Traceback (most recent call last):
- ...
- SyntaxError: Unknown name: dict
"""
# Local import to speed up numpy's import time.