summaryrefslogtreecommitdiff
path: root/numpy/lib/npyio.py
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/lib/npyio.py')
-rw-r--r--numpy/lib/npyio.py80
1 files changed, 52 insertions, 28 deletions
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index af259cfef..42a539f78 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -285,21 +285,22 @@ def load(file, mmap_mode=None):
Parameters
----------
file : file-like object or string
- The file to read. It must support ``seek()`` and ``read()`` methods.
- If the filename extension is ``.gz``, the file is first decompressed.
+ The file to read. File-like objects must support the
+ ``seek()`` and ``read()`` methods. Pickled files require that the
+ file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
- If not None, then memory-map the file, using the given mode
- (see `numpy.memmap` for a detailed description of the modes).
- A memory-mapped array is kept on disk. However, it can be accessed
- and sliced like any ndarray. Memory mapping is especially useful for
- accessing small fragments of large files without reading the entire
- file into memory.
+ If not None, then memory-map the file, using the given mode (see
+ `numpy.memmap` for a detailed description of the modes). A
+ memory-mapped array is kept on disk. However, it can be accessed
+ and sliced like any ndarray. Memory mapping is especially useful
+ for accessing small fragments of large files without reading the
+ entire file into memory.
Returns
-------
result : array, tuple, dict, etc.
- Data stored in the file. For ``.npz`` files, the returned instance of
- NpzFile class must be closed to avoid leaking file descriptors.
+ Data stored in the file. For ``.npz`` files, the returned instance
+ of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
@@ -308,7 +309,7 @@ def load(file, mmap_mode=None):
See Also
--------
- save, savez, loadtxt
+ save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
@@ -319,13 +320,14 @@ def load(file, mmap_mode=None):
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- - If the file is a ``.npz`` file, the returned value supports the context
- manager protocol in a similar fashion to the open function::
+ - If the file is a ``.npz`` file, the returned value supports the
+ context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
- The underlying file descriptor is closed when exiting the 'with' block.
+ The underlying file descriptor is closed when exiting the 'with'
+ block.
Examples
--------
@@ -549,6 +551,7 @@ def savez_compressed(file, *args, **kwds):
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
+ numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
@@ -668,6 +671,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
+
.. versionadded:: 1.6.0
Returns
@@ -840,6 +844,11 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
continue
if usecols:
vals = [vals[i] for i in usecols]
+ if len(vals) != N:
+ line_num = i + skiprows + 1
+ raise ValueError("Wrong number of columns at line %d"
+ % line_num)
+
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
@@ -906,22 +915,26 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
- Character separating columns.
+ String or character separating columns.
newline : str, optional
+ String or character separating lines.
+
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
+
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
+
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
+
.. versionadded:: 1.7.0
- Character separating lines.
See Also
--------
@@ -1191,14 +1204,20 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
+ skip_rows : int, optional
+ `skip_rows` was deprecated in numpy 1.5, and will be removed in
+ numpy 2.0. Please use `skip_header` instead.
skip_header : int, optional
- The numbers of lines to skip at the beginning of the file.
+ The number of lines to skip at the beginning of the file.
skip_footer : int, optional
- The numbers of lines to skip at the end of the file
+ The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
+ missing : variable, optional
+ `missing` was deprecated in numpy 1.5, and will be removed in
+ numpy 2.0. Please use `missing_values` instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
@@ -1236,6 +1255,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
+ loose : bool, optional
+ If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
@@ -1840,7 +1861,7 @@ def recfromtxt(fname, **kwargs):
array will be determined from the data.
"""
- kwargs.update(dtype=kwargs.get('dtype', None))
+ kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
@@ -1867,17 +1888,20 @@ def recfromcsv(fname, **kwargs):
--------
numpy.genfromtxt : generic function to load ASCII data.
+ Notes
+ -----
+ By default, `dtype` is None, which means that the data-type of the output
+ array will be determined from the data.
+
"""
- case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
- names = kwargs.get('names', True)
- if names is None:
- names = True
- kwargs.update(dtype=kwargs.get('update', None),
- delimiter=kwargs.get('delimiter', ",") or ",",
- names=names,
- case_sensitive=case_sensitive)
- usemask = kwargs.get("usemask", False)
+ # Set default kwargs for genfromtxt as relevant to csv import.
+ kwargs.setdefault("case_sensitive", "lower")
+ kwargs.setdefault("names", True)
+ kwargs.setdefault("delimiter", ",")
+ kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
+
+ usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)