summaryrefslogtreecommitdiff
path: root/numpy/lib
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/lib')
-rw-r--r--numpy/lib/__init__.py2
-rw-r--r--numpy/lib/_datasource.py8
-rw-r--r--numpy/lib/arraysetops.py43
-rw-r--r--numpy/lib/function_base.py50
-rw-r--r--numpy/lib/histograms.py32
-rw-r--r--numpy/lib/nanfunctions.py16
-rw-r--r--numpy/lib/npyio.py46
-rw-r--r--numpy/lib/polynomial.py6
-rw-r--r--numpy/lib/shape_base.py10
-rw-r--r--numpy/lib/stride_tricks.py2
-rw-r--r--numpy/lib/tests/test__datasource.py31
-rw-r--r--numpy/lib/tests/test_arraypad.py58
-rw-r--r--numpy/lib/tests/test_arraysetops.py92
-rw-r--r--numpy/lib/tests/test_format.py23
-rw-r--r--numpy/lib/tests/test_function_base.py10
-rw-r--r--numpy/lib/tests/test_histograms.py14
-rw-r--r--numpy/lib/tests/test_index_tricks.py1
-rw-r--r--numpy/lib/tests/test_io.py26
-rw-r--r--numpy/lib/tests/test_polynomial.py150
-rw-r--r--numpy/lib/tests/test_recfunctions.py8
-rw-r--r--numpy/lib/tests/test_shape_base.py23
-rw-r--r--numpy/lib/tests/test_stride_tricks.py14
-rw-r--r--numpy/lib/tests/test_ufunclike.py15
-rw-r--r--numpy/lib/type_check.py2
-rw-r--r--numpy/lib/ufunclike.py28
25 files changed, 505 insertions, 205 deletions
diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py
index dc40ac67b..c1757150e 100644
--- a/numpy/lib/__init__.py
+++ b/numpy/lib/__init__.py
@@ -26,7 +26,7 @@ from .financial import *
from .arrayterator import Arrayterator
from .arraypad import *
from ._version import *
-from numpy.core.multiarray import tracemalloc_domain
+from numpy.core._multiarray_umath import tracemalloc_domain
__all__ = ['emath', 'math', 'tracemalloc_domain']
__all__ += type_check.__all__
diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py
index 6f1295f09..ab00b1444 100644
--- a/numpy/lib/_datasource.py
+++ b/numpy/lib/_datasource.py
@@ -37,6 +37,7 @@ from __future__ import division, absolute_import, print_function
import os
import sys
+import warnings
import shutil
import io
@@ -85,9 +86,10 @@ def _python2_bz2open(fn, mode, encoding, newline):
if "t" in mode:
# BZ2File is missing necessary functions for TextIOWrapper
- raise ValueError("bz2 text files not supported in python2")
- else:
- return bz2.BZ2File(fn, mode)
+ warnings.warn("Assuming latin1 encoding for bz2 text file in Python2",
+ RuntimeWarning, stacklevel=5)
+ mode = mode.replace("t", "")
+ return bz2.BZ2File(fn, mode)
def _python2_gzipopen(fn, mode, encoding, newline):
""" Wrapper to open gzip in text mode.
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index 5880ea154..62e9b6d50 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -82,6 +82,11 @@ def ediff1d(ary, to_end=None, to_begin=None):
# force a 1d array
ary = np.asanyarray(ary).ravel()
+ # we have unit tests enforcing
+ # propagation of the dtype of input
+ # ary to returned result
+ dtype_req = ary.dtype
+
# fast track default case
if to_begin is None and to_end is None:
return ary[1:] - ary[:-1]
@@ -89,13 +94,23 @@ def ediff1d(ary, to_end=None, to_begin=None):
if to_begin is None:
l_begin = 0
else:
- to_begin = np.asanyarray(to_begin).ravel()
+ to_begin = np.asanyarray(to_begin)
+ if not np.can_cast(to_begin, dtype_req):
+ raise TypeError("dtype of to_begin must be compatible "
+ "with input ary")
+
+ to_begin = to_begin.ravel()
l_begin = len(to_begin)
if to_end is None:
l_end = 0
else:
- to_end = np.asanyarray(to_end).ravel()
+ to_end = np.asanyarray(to_end)
+ if not np.can_cast(to_end, dtype_req):
+ raise TypeError("dtype of to_end must be compatible "
+ "with input ary")
+
+ to_end = to_end.ravel()
l_end = len(to_end)
# do the calculation in place and copy to_begin and to_end
@@ -312,12 +327,12 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
return_indices : bool
- If True, the indices which correspond to the intersection of the
- two arrays are returned. The first instance of a value is used
- if there are multiple. Default is False.
-
- .. versionadded:: 1.15.0
-
+ If True, the indices which correspond to the intersection of the two
+ arrays are returned. The first instance of a value is used if there are
+ multiple. Default is False.
+
+ .. versionadded:: 1.15.0
+
Returns
-------
intersect1d : ndarray
@@ -326,7 +341,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
The indices of the first occurrences of the common values in `ar1`.
Only provided if `return_indices` is True.
comm2 : ndarray
- The indices of the first occurrences of the common values in `ar2`.
+ The indices of the first occurrences of the common values in `ar2`.
Only provided if `return_indices` is True.
@@ -345,7 +360,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
>>> from functools import reduce
>>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([3])
-
+
To return the indices of the values common to the input arrays
along with the intersected values:
>>> x = np.array([1, 1, 2, 3, 4])
@@ -355,8 +370,11 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
(array([0, 2, 4]), array([1, 0, 2]))
>>> xy, x[x_ind], y[y_ind]
(array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
-
+
"""
+ ar1 = np.asanyarray(ar1)
+ ar2 = np.asanyarray(ar2)
+
if not assume_unique:
if return_indices:
ar1, ind1 = unique(ar1, return_index=True)
@@ -367,7 +385,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
else:
ar1 = ar1.ravel()
ar2 = ar2.ravel()
-
+
aux = np.concatenate((ar1, ar2))
if return_indices:
aux_sort_indices = np.argsort(aux, kind='mergesort')
@@ -389,6 +407,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
else:
return int1d
+
def setxor1d(ar1, ar2, assume_unique=False):
"""
Find the set exclusive-or of two arrays.
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 1a43da8b0..2992e92bb 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -305,12 +305,17 @@ def average(a, axis=None, weights=None, returned=False):
Returns
-------
- average, [sum_of_weights] : array_type or double
- Return the average along the specified axis. When returned is `True`,
+ retval, [sum_of_weights] : array_type or double
+ Return the average along the specified axis. When `returned` is `True`,
return a tuple with the average as the first element and the sum
- of the weights as the second element. The return type is `Float`
- if `a` is of integer type, otherwise it is of the same type as `a`.
- `sum_of_weights` is of the same type as `average`.
+ of the weights as the second element. `sum_of_weights` is of the
+ same type as `retval`. The result dtype follows a genereal pattern.
+ If `weights` is None, the result dtype will be that of `a` , or ``float64``
+ if `a` is integral. Otherwise, if `weights` is not None and `a` is non-
+ integral, the result type will be the type of lowest precision capable of
+ representing values of both `a` and `weights`. If `a` happens to be
+ integral, the previous rules still applies but the result dtype will
+ at least be ``float64``.
Raises
------
@@ -327,6 +332,8 @@ def average(a, axis=None, weights=None, returned=False):
ma.average : average for masked arrays -- useful if your data contains
"missing" values
+ numpy.result_type : Returns the type that results from applying the
+ numpy type promotion rules to the arguments.
Examples
--------
@@ -346,10 +353,16 @@ def average(a, axis=None, weights=None, returned=False):
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
+
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
-
+
+ >>> a = np.ones(5, dtype=np.float128)
+ >>> w = np.ones(5, dtype=np.complex64)
+ >>> avg = np.average(a, weights=w)
+ >>> print(avg.dtype)
+ complex256
"""
a = np.asanyarray(a)
@@ -1309,7 +1322,7 @@ def interp(x, xp, fp, left=None, right=None, period=None):
return interp_func(x, xp, fp, left, right)
-def angle(z, deg=0):
+def angle(z, deg=False):
"""
Return the angle of the complex argument.
@@ -1325,6 +1338,9 @@ def angle(z, deg=0):
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
+
+ ..versionchanged:: 1.16.0
+ This function works on subclasses of ndarray like `ma.array`.
See Also
--------
@@ -1339,18 +1355,18 @@ def angle(z, deg=0):
45.0
"""
- if deg:
- fact = 180/pi
- else:
- fact = 1.0
- z = asarray(z)
- if (issubclass(z.dtype.type, _nx.complexfloating)):
+ z = asanyarray(z)
+ if issubclass(z.dtype.type, _nx.complexfloating):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
- return arctan2(zimag, zreal) * fact
+
+ a = arctan2(zimag, zreal)
+ if deg:
+ a *= 180/pi
+ return a
def unwrap(p, discont=pi, axis=-1):
@@ -1766,8 +1782,8 @@ class vectorize(object):
Generalized function class.
Define a vectorized function which takes a nested sequence of objects or
- numpy arrays as inputs and returns an single or tuple of numpy array as
- output. The vectorized function evaluates `pyfunc` over successive tuples
+ numpy arrays as inputs and returns a single numpy array or a tuple of numpy
+ arrays. The vectorized function evaluates `pyfunc` over successive tuples
of the input arrays like the python map function, except it uses the
broadcasting rules of numpy.
@@ -3989,11 +4005,13 @@ def meshgrid(*xi, **kwargs):
`meshgrid` is very useful to evaluate functions on a grid.
+ >>> import matplotlib.pyplot as plt
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = np.meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
+ >>> plt.show()
"""
ndim = len(xi)
diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py
index 422b356f7..f03f30fb0 100644
--- a/numpy/lib/histograms.py
+++ b/numpy/lib/histograms.py
@@ -260,6 +260,32 @@ def _get_outer_edges(a, range):
return first_edge, last_edge
+def _unsigned_subtract(a, b):
+ """
+ Subtract two values where a >= b, and produce an unsigned result
+
+ This is needed when finding the difference between the upper and lower
+ bound of an int16 histogram
+ """
+ # coerce to a single type
+ signed_to_unsigned = {
+ np.byte: np.ubyte,
+ np.short: np.ushort,
+ np.intc: np.uintc,
+ np.int_: np.uint,
+ np.longlong: np.ulonglong
+ }
+ dt = np.result_type(a, b)
+ try:
+ dt = signed_to_unsigned[dt.type]
+ except KeyError:
+ return np.subtract(a, b, dtype=dt)
+ else:
+ # we know the inputs are integers, and we are deliberately casting
+ # signed to unsigned
+ return np.subtract(a, b, casting='unsafe', dtype=dt)
+
+
def _get_bin_edges(a, bins, range, weights):
"""
Computes the bins used internally by `histogram`.
@@ -311,7 +337,7 @@ def _get_bin_edges(a, bins, range, weights):
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bin_name](a)
if width:
- n_equal_bins = int(np.ceil((last_edge - first_edge) / width))
+ n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
@@ -703,7 +729,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None,
n = np.zeros(n_equal_bins, ntype)
# Pre-compute histogram scaling factor
- norm = n_equal_bins / (last_edge - first_edge)
+ norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
@@ -731,7 +757,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None,
# Compute the bin indices, and for values that lie exactly on
# last_edge we need to subtract one
- f_indices = (tmp_a - first_edge) * norm
+ f_indices = _unsigned_subtract(tmp_a, first_edge) * norm
indices = f_indices.astype(np.intp)
indices[indices == n_equal_bins] -= 1
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index abd2da1a2..8d6b0f139 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -1178,13 +1178,15 @@ def nanquantile(a, q, axis=None, out=None, overwrite_input=False,
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
- * linear: ``i + (j - i) * fraction``, where ``fraction``
- is the fractional part of the index surrounded by ``i``
- and ``j``.
- * lower: ``i``.
- * higher: ``j``.
- * nearest: ``i`` or ``j``, whichever is nearest.
- * midpoint: ``(i + j) / 2``.
+
+ * linear: ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * lower: ``i``.
+ * higher: ``j``.
+ * nearest: ``i`` or ``j``, whichever is nearest.
+ * midpoint: ``(i + j) / 2``.
+
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 7788ac319..73cf5554a 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -13,6 +13,7 @@ import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
+from numpy.core._internal import recursive
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like,
@@ -379,16 +380,6 @@ def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
memmap([4, 5, 6])
"""
- own_fid = False
- if isinstance(file, basestring):
- fid = open(file, "rb")
- own_fid = True
- elif is_pathlib_path(file):
- fid = file.open("rb")
- own_fid = True
- else:
- fid = file
-
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of NumPy arrays is loaded
@@ -409,21 +400,33 @@ def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
# Nothing to do on Python 2
pickle_kwargs = {}
+ # TODO: Use contextlib.ExitStack once we drop Python 2
+ if isinstance(file, basestring):
+ fid = open(file, "rb")
+ own_fid = True
+ elif is_pathlib_path(file):
+ fid = file.open("rb")
+ own_fid = True
+ else:
+ fid = file
+ own_fid = False
+
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = b'PK\x03\x04'
+ _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
- if magic.startswith(_ZIP_PREFIX):
+ if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
- tmp = own_fid
+ ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
+ pickle_kwargs=pickle_kwargs)
own_fid = False
- return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
- pickle_kwargs=pickle_kwargs)
+ return ret
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
@@ -943,7 +946,8 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
fencoding = locale.getpreferredencoding()
# not to be confused with the flatten_dtype we import...
- def flatten_dtype_internal(dt):
+ @recursive
+ def flatten_dtype_internal(self, dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
@@ -963,7 +967,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
- flat_dt, flat_packing = flatten_dtype_internal(tp)
+ flat_dt, flat_packing = self(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if tp.ndim > 0:
@@ -972,7 +976,8 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
packing.append((len(flat_dt), flat_packing))
return (types, packing)
- def pack_items(items, packing):
+ @recursive
+ def pack_items(self, items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
@@ -984,7 +989,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
start = 0
ret = []
for length, subpacking in packing:
- ret.append(pack_items(items[start:start+length], subpacking))
+ ret.append(self(items[start:start+length], subpacking))
start += length
return tuple(ret)
@@ -1110,11 +1115,6 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
finally:
if fown:
fh.close()
- # recursive closures have a cyclic reference to themselves, which
- # requires gc to collect (gh-10620). To avoid this problem, for
- # performance and PyPy friendliness, we break the cycle:
- flatten_dtype_internal = None
- pack_items = None
if X is None:
X = np.array([], dtype)
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 0e691f56e..9f3b84732 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -396,7 +396,11 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
- the squared error.
+ the squared error in the order `deg`, `deg-1`, ... `0`.
+
+ The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class
+ method is recommended for new code as it is more stable numerically. See
+ the documentation of the method for more information.
Parameters
----------
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 65104115a..66f534734 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -536,7 +536,11 @@ def expand_dims(a, axis):
True
"""
- a = asarray(a)
+ if isinstance(a, matrix):
+ a = asarray(a)
+ else:
+ a = asanyarray(a)
+
shape = a.shape
if axis > a.ndim or axis < -a.ndim - 1:
# 2017-05-17, 1.13.0
@@ -684,7 +688,7 @@ def array_split(ary, indices_or_sections, axis=0):
except AttributeError:
Ntotal = len(ary)
try:
- # handle scalar case.
+ # handle array case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
except TypeError:
@@ -696,7 +700,7 @@ def array_split(ary, indices_or_sections, axis=0):
section_sizes = ([0] +
extras * [Neach_section+1] +
(Nsections-extras) * [Neach_section])
- div_points = _nx.array(section_sizes).cumsum()
+ div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary, axis, 0)
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index bc5993802..ca13738c1 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -242,7 +242,7 @@ def broadcast_arrays(*args, **kwargs):
subok = kwargs.pop('subok', False)
if kwargs:
raise TypeError('broadcast_arrays() got an unexpected keyword '
- 'argument {!r}'.format(kwargs.keys()[0]))
+ 'argument {!r}'.format(list(kwargs.keys())[0]))
args = [np.array(_m, copy=False, subok=subok) for _m in args]
shape = _broadcast_shape(*args)
diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py
index 32812990c..1df8bebf6 100644
--- a/numpy/lib/tests/test__datasource.py
+++ b/numpy/lib/tests/test__datasource.py
@@ -2,11 +2,14 @@ from __future__ import division, absolute_import, print_function
import os
import sys
+import pytest
from tempfile import mkdtemp, mkstemp, NamedTemporaryFile
from shutil import rmtree
-from numpy.testing import assert_, assert_equal, assert_raises, SkipTest
import numpy.lib._datasource as datasource
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_warns
+ )
if sys.version_info[0] >= 3:
import urllib.request as urllib_request
@@ -30,14 +33,14 @@ def urlopen_stub(url, data=None):
old_urlopen = None
-def setup():
+def setup_module():
global old_urlopen
old_urlopen = urllib_request.urlopen
urllib_request.urlopen = urlopen_stub
-def teardown():
+def teardown_module():
urllib_request.urlopen = old_urlopen
# A valid website for more robust testing
@@ -134,7 +137,7 @@ class TestDataSourceOpen(object):
import gzip
except ImportError:
# We don't have the gzip capabilities to test.
- raise SkipTest
+ pytest.skip()
# Test datasource's internal file_opener for Gzip files.
filepath = os.path.join(self.tmpdir, 'foobar.txt.gz')
fp = gzip.open(filepath, 'w')
@@ -150,7 +153,7 @@ class TestDataSourceOpen(object):
import bz2
except ImportError:
# We don't have the bz2 capabilities to test.
- raise SkipTest
+ pytest.skip()
# Test datasource's internal file_opener for BZip2 files.
filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')
fp = bz2.BZ2File(filepath, 'w')
@@ -161,6 +164,24 @@ class TestDataSourceOpen(object):
fp.close()
assert_equal(magic_line, result)
+ @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Python 2 only")
+ def test_Bz2File_text_mode_warning(self):
+ try:
+ import bz2
+ except ImportError:
+ # We don't have the bz2 capabilities to test.
+ pytest.skip()
+ # Test datasource's internal file_opener for BZip2 files.
+ filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')
+ fp = bz2.BZ2File(filepath, 'w')
+ fp.write(magic_line)
+ fp.close()
+ with assert_warns(RuntimeWarning):
+ fp = self.ds.open(filepath, 'rt')
+ result = fp.readline()
+ fp.close()
+ assert_equal(magic_line, result)
+
class TestDataSourceExists(object):
def setup(self):
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index 45d624781..e62fccaa0 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -3,8 +3,11 @@
"""
from __future__ import division, absolute_import, print_function
+import pytest
+
import numpy as np
-from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,)
+from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,
+ assert_equal)
from numpy.lib import pad
@@ -344,6 +347,20 @@ class TestStatistic(object):
)
assert_array_equal(a, b)
+ @pytest.mark.parametrize("mode", [
+ pytest.param("mean", marks=pytest.mark.xfail(reason="gh-11216")),
+ "median",
+ "minimum",
+ "maximum"
+ ])
+ def test_same_prepend_append(self, mode):
+ """ Test that appended and prepended values are equal """
+ # This test is constructed to trigger floating point rounding errors in
+ # a way that caused gh-11216 for mode=='mean'
+ a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64)
+ a = np.pad(a, (1, 1), mode)
+ assert_equal(a[0], a[-1])
+
class TestConstant(object):
def test_check_constant(self):
@@ -502,6 +519,21 @@ class TestConstant(object):
expected = np.full(7, int64_max, dtype=np.int64)
assert_array_equal(test, expected)
+ def test_check_object_array(self):
+ arr = np.empty(1, dtype=object)
+ obj_a = object()
+ arr[0] = obj_a
+ obj_b = object()
+ obj_c = object()
+ arr = np.pad(arr, pad_width=1, mode='constant',
+ constant_values=(obj_b, obj_c))
+
+ expected = np.empty((3,), dtype=object)
+ expected[0] = obj_b
+ expected[1] = obj_a
+ expected[2] = obj_c
+
+ assert_array_equal(arr, expected)
class TestLinearRamp(object):
def test_check_simple(self):
@@ -542,6 +574,25 @@ class TestLinearRamp(object):
[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
assert_allclose(test, expected)
+ @pytest.mark.xfail(exceptions=(AssertionError,))
+ def test_object_array(self):
+ from fractions import Fraction
+ arr = np.array([Fraction(1, 2), Fraction(-1, 2)])
+ actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0)
+
+ # deliberately chosen to have a non-power-of-2 denominator such that
+ # rounding to floats causes a failure.
+ expected = np.array([
+ Fraction( 0, 12),
+ Fraction( 3, 12),
+ Fraction( 6, 12),
+ Fraction(-6, 12),
+ Fraction(-4, 12),
+ Fraction(-2, 12),
+ Fraction(-0, 12),
+ ])
+ assert_equal(actual, expected)
+
class TestReflect(object):
def test_check_simple(self):
@@ -887,6 +938,11 @@ class TestWrap(object):
b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1])
assert_array_equal(a, b)
+ def test_pad_with_zero(self):
+ a = np.ones((3, 5))
+ b = np.pad(a, (0, 5), mode="wrap")
+ assert_array_equal(a, b[:-5, :-5])
+
class TestStatLen(object):
def test_check_simple(self):
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index dace5ade8..4b61726d2 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -6,10 +6,13 @@ from __future__ import division, absolute_import, print_function
import numpy as np
import sys
-from numpy.testing import assert_array_equal, assert_equal, assert_raises
+from numpy.testing import (assert_array_equal, assert_equal,
+ assert_raises, assert_raises_regex)
from numpy.lib.arraysetops import (
ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin
)
+import pytest
+
class TestSetOps(object):
@@ -30,19 +33,30 @@ class TestSetOps(object):
ed = np.array([1, 2, 5])
c = intersect1d(a, b)
assert_array_equal(c, ed)
-
assert_array_equal([], intersect1d([], []))
-
+
+ def test_intersect1d_array_like(self):
+ # See gh-11772
+ class Test(object):
+ def __array__(self):
+ return np.arange(3)
+
+ a = Test()
+ res = intersect1d(a, a)
+ assert_array_equal(res, a)
+ res = intersect1d([1, 2, 3], [1, 2, 3])
+ assert_array_equal(res, [1, 2, 3])
+
def test_intersect1d_indices(self):
# unique inputs
- a = np.array([1, 2, 3, 4])
+ a = np.array([1, 2, 3, 4])
b = np.array([2, 1, 4, 6])
c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
ee = np.array([1, 2, 4])
assert_array_equal(c, ee)
assert_array_equal(a[i1], ee)
assert_array_equal(b[i2], ee)
-
+
# non-unique inputs
a = np.array([1, 2, 2, 3, 4, 3, 2])
b = np.array([1, 8, 4, 2, 2, 3, 2, 3])
@@ -51,7 +65,7 @@ class TestSetOps(object):
assert_array_equal(c, ef)
assert_array_equal(a[i1], ef)
assert_array_equal(b[i2], ef)
-
+
# non1d, unique inputs
a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]])
b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]])
@@ -61,7 +75,7 @@ class TestSetOps(object):
ea = np.array([2, 6, 7, 8])
assert_array_equal(ea, a[ui1])
assert_array_equal(ea, b[ui2])
-
+
# non1d, not assumed to be uniqueinputs
a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]])
b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]])
@@ -71,7 +85,7 @@ class TestSetOps(object):
ea = np.array([2, 7, 8])
assert_array_equal(ea, a[ui1])
assert_array_equal(ea, b[ui2])
-
+
def test_setxor1d(self):
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5])
@@ -114,6 +128,68 @@ class TestSetOps(object):
assert_array_equal([7,1], ediff1d(two_elem, to_begin=7))
assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6]))
+ @pytest.mark.parametrize("ary, prepend, append", [
+ # should fail because trying to cast
+ # np.nan standard floating point value
+ # into an integer array:
+ (np.array([1, 2, 3], dtype=np.int64),
+ None,
+ np.nan),
+ # should fail because attempting
+ # to downcast to smaller int type:
+ (np.array([1, 2, 3], dtype=np.int32),
+ np.array([5, 7, 2], dtype=np.int64),
+ None),
+ # should fail because attempting to cast
+ # two special floating point values
+ # to integers (on both sides of ary):
+ (np.array([1., 3., 9.], dtype=np.int8),
+ np.nan,
+ np.nan),
+ ])
+ def test_ediff1d_forbidden_type_casts(self, ary, prepend, append):
+ # verify resolution of gh-11490
+
+ # specifically, raise an appropriate
+ # Exception when attempting to append or
+ # prepend with an incompatible type
+ msg = 'must be compatible'
+ with assert_raises_regex(TypeError, msg):
+ ediff1d(ary=ary,
+ to_end=append,
+ to_begin=prepend)
+
+ @pytest.mark.parametrize("ary,"
+ "prepend,"
+ "append,"
+ "expected", [
+ (np.array([1, 2, 3], dtype=np.int16),
+ 0,
+ None,
+ np.array([0, 1, 1], dtype=np.int16)),
+ (np.array([1, 2, 3], dtype=np.int32),
+ 0,
+ 0,
+ np.array([0, 1, 1, 0], dtype=np.int32)),
+ (np.array([1, 2, 3], dtype=np.int64),
+ 3,
+ -9,
+ np.array([3, 1, 1, -9], dtype=np.int64)),
+ ])
+ def test_ediff1d_scalar_handling(self,
+ ary,
+ prepend,
+ append,
+ expected):
+ # maintain backwards-compatibility
+ # of scalar prepend / append behavior
+ # in ediff1d following fix for gh-11490
+ actual = np.ediff1d(ary=ary,
+ to_end=append,
+ to_begin=prepend)
+ assert_equal(actual, expected)
+
+
def test_isin(self):
# the tests for in1d cover most of isin's behavior
# if in1d is removed, would need to change those tests to test
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index fd227595a..3185e32ac 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -286,7 +286,8 @@ from io import BytesIO
import numpy as np
from numpy.testing import (
- assert_, assert_array_equal, assert_raises, raises, SkipTest
+ assert_, assert_array_equal, assert_raises, assert_raises_regex,
+ raises
)
from numpy.lib import format
@@ -678,12 +679,9 @@ def test_write_version():
(255, 255),
]
for version in bad_versions:
- try:
+ with assert_raises_regex(ValueError,
+ 'we only support format version.*'):
format.write_array(f, arr, version=version)
- except ValueError:
- pass
- else:
- raise AssertionError("we should have raised a ValueError for the bad version %r" % (version,))
bad_version_magic = [
@@ -809,7 +807,7 @@ def test_bad_header():
def test_large_file_support():
if (sys.platform == 'win32' or sys.platform == 'cygwin'):
- raise SkipTest("Unknown if Windows has sparse filesystems")
+ pytest.skip("Unknown if Windows has sparse filesystems")
# try creating a large sparse file
tf_name = os.path.join(tempdir, 'sparse_file')
try:
@@ -819,7 +817,7 @@ def test_large_file_support():
import subprocess as sp
sp.check_call(["truncate", "-s", "5368709120", tf_name])
except Exception:
- raise SkipTest("Could not create 5GB large file")
+ pytest.skip("Could not create 5GB large file")
# write a small array to the end
with open(tf_name, "wb") as f:
f.seek(5368709120)
@@ -841,7 +839,7 @@ def test_large_archive():
try:
a = np.empty((2**30, 2), dtype=np.uint8)
except MemoryError:
- raise SkipTest("Could not create large file")
+ pytest.skip("Could not create large file")
fname = os.path.join(tempdir, "large_archive")
@@ -852,3 +850,10 @@ def test_large_archive():
new_a = np.load(f)["arr"]
assert_(a.shape == new_a.shape)
+
+
+def test_empty_npz():
+ # Test for gh-9989
+ fname = os.path.join(tempdir, "nothing.npz")
+ np.savez(fname)
+ np.load(fname)
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index ba5b90e8c..d5faed6ae 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -1043,6 +1043,16 @@ class TestAngle(object):
assert_array_almost_equal(y, yo, 11)
assert_array_almost_equal(z, zo, 11)
+ def test_subclass(self):
+ x = np.ma.array([1 + 3j, 1, np.sqrt(2)/2 * (1 + 1j)])
+ x[1] = np.ma.masked
+ expected = np.ma.array([np.arctan(3.0 / 1.0), 0, np.arctan(1.0)])
+ expected[1] = np.ma.masked
+ actual = angle(x)
+ assert_equal(type(actual), type(expected))
+ assert_equal(actual.mask, expected.mask)
+ assert_equal(actual, expected)
+
class TestTrimZeros(object):
diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py
index f136b5c81..561f5f938 100644
--- a/numpy/lib/tests/test_histograms.py
+++ b/numpy/lib/tests/test_histograms.py
@@ -310,6 +310,20 @@ class TestHistogram(object):
assert_equal(d_edge.dtype, dates.dtype)
assert_equal(t_edge.dtype, td)
+ def do_signed_overflow_bounds(self, dtype):
+ exponent = 8 * np.dtype(dtype).itemsize - 1
+ arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype)
+ hist, e = histogram(arr, bins=2)
+ assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4])
+ assert_equal(hist, [1, 1])
+
+ def test_signed_overflow_bounds(self):
+ self.do_signed_overflow_bounds(np.byte)
+ self.do_signed_overflow_bounds(np.short)
+ self.do_signed_overflow_bounds(np.intc)
+ self.do_signed_overflow_bounds(np.int_)
+ self.do_signed_overflow_bounds(np.longlong)
+
def do_precision_lower_bound(self, float_small, float_large):
eps = np.finfo(float_large).eps
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index 315251daa..7e9c026e4 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -113,7 +113,6 @@ class TestRavelUnravelIndex(object):
assert_(x.flags.writeable)
assert_(y.flags.writeable)
-
def test_0d(self):
# gh-580
x = np.unravel_index(0, ())
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index f58c9e33d..ef08c3f41 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -21,7 +21,7 @@ from numpy.lib._iotools import ConverterError, ConversionWarning
from numpy.compat import asbytes, bytes, unicode, Path
from numpy.ma.testutils import assert_equal
from numpy.testing import (
- assert_warns, assert_, SkipTest, assert_raises_regex, assert_raises,
+ assert_warns, assert_, assert_raises_regex, assert_raises,
assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles,
)
@@ -348,7 +348,6 @@ class TestSaveTxt(object):
assert_raises(ValueError, np.savetxt, c, np.array(1))
assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
-
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
@@ -568,12 +567,12 @@ class LoadTxtBase(object):
@pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2")
@pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
- def test_compressed_gzip(self):
+ def test_compressed_bz2(self):
self.check_compressed(bz2.open, ('.bz2',))
@pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma")
@pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
- def test_compressed_gzip(self):
+ def test_compressed_lzma(self):
self.check_compressed(lzma.open, ('.xz', '.lzma'))
def test_encoding(self):
@@ -1455,14 +1454,10 @@ M 33 21.99
assert_equal(test, control)
ndtype = [('nest', [('idx', int), ('code', object)])]
- try:
+ with assert_raises_regex(NotImplementedError,
+ 'Nested fields.* not supported.*'):
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
- except NotImplementedError:
- pass
- else:
- errmsg = "Nested dtype involving objects should be supported."
- raise AssertionError(errmsg)
def test_userconverters_with_explicit_dtype(self):
# Test user_converters w/ explicit (standard) dtype
@@ -2025,7 +2020,6 @@ M 33 21.99
assert_equal(test['f0'], 0)
assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8"))
-
def test_utf8_file_nodtype_unicode(self):
# bytes encoding with non-latin1 -> unicode upcast
utf8 = u'\u03d6'
@@ -2039,8 +2033,8 @@ M 33 21.99
encoding = locale.getpreferredencoding()
utf8.encode(encoding)
except (UnicodeError, ImportError):
- raise SkipTest('Skipping test_utf8_file_nodtype_unicode, '
- 'unable to encode utf8 in preferred encoding')
+ pytest.skip('Skipping test_utf8_file_nodtype_unicode, '
+ 'unable to encode utf8 in preferred encoding')
with temppath() as path:
with io.open(path, "wt") as f:
@@ -2418,3 +2412,9 @@ def test_load_refcount():
with assert_no_gc_cycles():
np.load(f)
+
+ f.seek(0)
+ dt = [("a", 'u1', 2), ("b", 'u1', 2)]
+ with assert_no_gc_cycles():
+ x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt)
+ assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py
index 7f6fca4a4..9f7c117a2 100644
--- a/numpy/lib/tests/test_polynomial.py
+++ b/numpy/lib/tests/test_polynomial.py
@@ -1,93 +1,79 @@
-'''
->>> p = np.poly1d([1.,2,3])
->>> p
-poly1d([1., 2., 3.])
->>> print(p)
- 2
-1 x + 2 x + 3
->>> q = np.poly1d([3.,2,1])
->>> q
-poly1d([3., 2., 1.])
->>> print(q)
- 2
-3 x + 2 x + 1
->>> print(np.poly1d([1.89999+2j, -3j, -5.12345678, 2+1j]))
- 3 2
-(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)
->>> print(np.poly1d([-3, -2, -1]))
- 2
--3 x - 2 x - 1
-
->>> p(0)
-3.0
->>> p(5)
-38.0
->>> q(0)
-1.0
->>> q(5)
-86.0
-
->>> p * q
-poly1d([ 3., 8., 14., 8., 3.])
->>> p / q
-(poly1d([0.33333333]), poly1d([1.33333333, 2.66666667]))
->>> p + q
-poly1d([4., 4., 4.])
->>> p - q
-poly1d([-2., 0., 2.])
->>> p ** 4
-poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.])
-
->>> p(q)
-poly1d([ 9., 12., 16., 8., 6.])
->>> q(p)
-poly1d([ 3., 12., 32., 40., 34.])
-
->>> np.asarray(p)
-array([1., 2., 3.])
->>> len(p)
-2
-
->>> p[0], p[1], p[2], p[3]
-(3.0, 2.0, 1.0, 0)
-
->>> p.integ()
-poly1d([0.33333333, 1. , 3. , 0. ])
->>> p.integ(1)
-poly1d([0.33333333, 1. , 3. , 0. ])
->>> p.integ(5)
-poly1d([0.00039683, 0.00277778, 0.025 , 0. , 0. ,
- 0. , 0. , 0. ])
->>> p.deriv()
-poly1d([2., 2.])
->>> p.deriv(2)
-poly1d([2.])
-
->>> q = np.poly1d([1.,2,3], variable='y')
->>> print(q)
- 2
-1 y + 2 y + 3
->>> q = np.poly1d([1.,2,3], variable='lambda')
->>> print(q)
- 2
-1 lambda + 2 lambda + 3
-
->>> np.polydiv(np.poly1d([1,0,-1]), np.poly1d([1,1]))
-(poly1d([ 1., -1.]), poly1d([0.]))
-
-'''
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
- assert_array_almost_equal, assert_raises, rundocs
+ assert_array_almost_equal, assert_raises
)
-class TestDocs(object):
- def test_doctests(self):
- return rundocs()
+class TestPolynomial(object):
+ def test_poly1d_str_and_repr(self):
+ p = np.poly1d([1., 2, 3])
+ assert_equal(repr(p), 'poly1d([1., 2., 3.])')
+ assert_equal(str(p),
+ ' 2\n'
+ '1 x + 2 x + 3')
+
+ q = np.poly1d([3., 2, 1])
+ assert_equal(repr(q), 'poly1d([3., 2., 1.])')
+ assert_equal(str(q),
+ ' 2\n'
+ '3 x + 2 x + 1')
+
+ r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j])
+ assert_equal(str(r),
+ ' 3 2\n'
+ '(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)')
+
+ assert_equal(str(np.poly1d([-3, -2, -1])),
+ ' 2\n'
+ '-3 x - 2 x - 1')
+
+ def test_poly1d_resolution(self):
+ p = np.poly1d([1., 2, 3])
+ q = np.poly1d([3., 2, 1])
+ assert_equal(p(0), 3.0)
+ assert_equal(p(5), 38.0)
+ assert_equal(q(0), 1.0)
+ assert_equal(q(5), 86.0)
+
+ def test_poly1d_math(self):
+ # here we use some simple coeffs to make calculations easier
+ p = np.poly1d([1., 2, 4])
+ q = np.poly1d([4., 2, 1])
+ assert_equal(p/q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75])))
+ assert_equal(p.integ(), np.poly1d([1/3, 1., 4., 0.]))
+ assert_equal(p.integ(1), np.poly1d([1/3, 1., 4., 0.]))
+
+ p = np.poly1d([1., 2, 3])
+ q = np.poly1d([3., 2, 1])
+ assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.]))
+ assert_equal(p + q, np.poly1d([4., 4., 4.]))
+ assert_equal(p - q, np.poly1d([-2., 0., 2.]))
+ assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.]))
+ assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.]))
+ assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.]))
+ assert_equal(p.deriv(), np.poly1d([2., 2.]))
+ assert_equal(p.deriv(2), np.poly1d([2.]))
+ assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])),
+ (np.poly1d([1., -1.]), np.poly1d([0.])))
+
+ def test_poly1d_misc(self):
+ p = np.poly1d([1., 2, 3])
+ assert_equal(np.asarray(p), np.array([1., 2., 3.]))
+ assert_equal(len(p), 2)
+ assert_equal((p[0], p[1], p[2], p[3]), (3.0, 2.0, 1.0, 0))
+
+ def test_poly1d_variable_arg(self):
+ q = np.poly1d([1., 2, 3], variable='y')
+ assert_equal(str(q),
+ ' 2\n'
+ '1 y + 2 y + 3')
+ q = np.poly1d([1., 2, 3], variable='lambda')
+ assert_equal(str(q),
+ ' 2\n'
+ '1 lambda + 2 lambda + 3')
def test_poly(self):
assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]),
diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py
index d4828bc1f..5585a95f9 100644
--- a/numpy/lib/tests/test_recfunctions.py
+++ b/numpy/lib/tests/test_recfunctions.py
@@ -541,12 +541,8 @@ class TestStackArrays(object):
test = stack_arrays((a, b), autoconvert=True)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
- try:
- test = stack_arrays((a, b), autoconvert=False)
- except TypeError:
- pass
- else:
- raise AssertionError
+ with assert_raises(TypeError):
+ stack_arrays((a, b), autoconvert=False)
def test_checktitles(self):
# Test using titles in the field names
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index c95894f94..6e4cd225d 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -3,6 +3,8 @@ from __future__ import division, absolute_import, print_function
import numpy as np
import warnings
import functools
+import sys
+import pytest
from numpy.lib.shape_base import (
apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit,
@@ -14,6 +16,9 @@ from numpy.testing import (
)
+IS_64BIT = sys.maxsize > 2**32
+
+
def _add_keepdims(func):
""" hack in keepdims behavior into a function taking an axis """
@functools.wraps(func)
@@ -293,6 +298,15 @@ class TestExpandDims(object):
assert_warns(DeprecationWarning, expand_dims, a, -6)
assert_warns(DeprecationWarning, expand_dims, a, 5)
+ def test_subclasses(self):
+ a = np.arange(10).reshape((2, 5))
+ a = np.ma.array(a, mask=a%3 == 0)
+
+ expanded = np.expand_dims(a, axis=1)
+ assert_(isinstance(expanded, np.ma.MaskedArray))
+ assert_equal(expanded.shape, (2, 1, 5))
+ assert_equal(expanded.mask.shape, (2, 1, 5))
+
class TestArraySplit(object):
def test_integer_0_split(self):
@@ -394,6 +408,15 @@ class TestArraySplit(object):
assert_(a.dtype.type is res[-1].dtype.type)
# perhaps should check higher dimensions
+ @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
+ def test_integer_split_2D_rows_greater_max_int32(self):
+ a = np.broadcast_to([0], (1 << 32, 2))
+ res = array_split(a, 4)
+ chunk = np.broadcast_to([0], (1 << 30, 2))
+ tgt = [chunk] * 4
+ for i in range(len(tgt)):
+ assert_equal(res[i].shape, tgt[i].shape)
+
def test_index_split_simple(self):
a = np.arange(10)
indices = [1, 5, 7]
diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py
index 3c2ca8b87..b2bd7da3e 100644
--- a/numpy/lib/tests/test_stride_tricks.py
+++ b/numpy/lib/tests/test_stride_tricks.py
@@ -3,7 +3,8 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.core._rational_tests import rational
from numpy.testing import (
- assert_equal, assert_array_equal, assert_raises, assert_
+ assert_equal, assert_array_equal, assert_raises, assert_,
+ assert_raises_regex
)
from numpy.lib.stride_tricks import (
as_strided, broadcast_arrays, _broadcast_shape, broadcast_to
@@ -57,6 +58,17 @@ def test_same():
assert_array_equal(x, bx)
assert_array_equal(y, by)
+def test_broadcast_kwargs():
+ # ensure that a TypeError is appropriately raised when
+ # np.broadcast_arrays() is called with any keyword
+ # argument other than 'subok'
+ x = np.arange(10)
+ y = np.arange(10)
+
+ with assert_raises_regex(TypeError,
+ r'broadcast_arrays\(\) got an unexpected keyword*'):
+ broadcast_arrays(x, y, dtype='float64')
+
def test_one_off():
x = np.array([[1, 2, 3]])
diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py
index 5604b3744..0f06876a1 100644
--- a/numpy/lib/tests/test_ufunclike.py
+++ b/numpy/lib/tests/test_ufunclike.py
@@ -4,8 +4,8 @@ import numpy as np
import numpy.core as nx
import numpy.lib.ufunclike as ufl
from numpy.testing import (
- assert_, assert_equal, assert_array_equal, assert_warns
- )
+ assert_, assert_equal, assert_array_equal, assert_warns, assert_raises
+)
class TestUfunclike(object):
@@ -21,6 +21,10 @@ class TestUfunclike(object):
assert_equal(res, tgt)
assert_equal(out, tgt)
+ a = a.astype(np.complex)
+ with assert_raises(TypeError):
+ ufl.isposinf(a)
+
def test_isneginf(self):
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
out = nx.zeros(a.shape, bool)
@@ -32,6 +36,10 @@ class TestUfunclike(object):
assert_equal(res, tgt)
assert_equal(out, tgt)
+ a = a.astype(np.complex)
+ with assert_raises(TypeError):
+ ufl.isneginf(a)
+
def test_fix(self):
a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]])
out = nx.zeros(a.shape, float)
@@ -52,7 +60,8 @@ class TestUfunclike(object):
return res
def __array_wrap__(self, obj, context=None):
- obj.metadata = self.metadata
+ if isinstance(obj, MyArray):
+ obj.metadata = self.metadata
return obj
def __array_finalize__(self, obj):
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 1664e6ebb..3f7aa32fa 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -215,7 +215,7 @@ def iscomplex(x):
if issubclass(ax.dtype.type, _nx.complexfloating):
return ax.imag != 0
res = zeros(ax.shape, bool)
- return +res # convert to array-scalar if needed
+ return res[()] # convert to scalar if needed
def isreal(x):
"""
diff --git a/numpy/lib/ufunclike.py b/numpy/lib/ufunclike.py
index e0bd95182..6259c5445 100644
--- a/numpy/lib/ufunclike.py
+++ b/numpy/lib/ufunclike.py
@@ -11,6 +11,7 @@ import numpy.core.numeric as nx
import warnings
import functools
+
def _deprecate_out_named_y(f):
"""
Allow the out argument to be passed as the name `y` (deprecated)
@@ -81,6 +82,7 @@ def fix(x, out=None):
res = res[()]
return res
+
@_deprecate_out_named_y
def isposinf(x, out=None):
"""
@@ -116,8 +118,9 @@ def isposinf(x, out=None):
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
- Errors result if the second argument is also supplied when `x` is a
- scalar input, or if first and second arguments have different shapes.
+ Errors result if the second argument is also supplied when x is a scalar
+ input, if first and second arguments have different shapes, or if the
+ first argument has complex values
Examples
--------
@@ -138,7 +141,14 @@ def isposinf(x, out=None):
array([0, 0, 1])
"""
- return nx.logical_and(nx.isinf(x), ~nx.signbit(x), out)
+ is_inf = nx.isinf(x)
+ try:
+ signbit = ~nx.signbit(x)
+ except TypeError:
+ raise TypeError('This operation is not supported for complex values '
+ 'because it would be ambiguous.')
+ else:
+ return nx.logical_and(is_inf, signbit, out)
@_deprecate_out_named_y
@@ -178,7 +188,8 @@ def isneginf(x, out=None):
(IEEE 754).
Errors result if the second argument is also supplied when x is a scalar
- input, or if first and second arguments have different shapes.
+ input, if first and second arguments have different shapes, or if the
+ first argument has complex values.
Examples
--------
@@ -199,4 +210,11 @@ def isneginf(x, out=None):
array([1, 0, 0])
"""
- return nx.logical_and(nx.isinf(x), nx.signbit(x), out)
+ is_inf = nx.isinf(x)
+ try:
+ signbit = nx.signbit(x)
+ except TypeError:
+ raise TypeError('This operation is not supported for complex values '
+ 'because it would be ambiguous.')
+ else:
+ return nx.logical_and(is_inf, signbit, out)