summaryrefslogtreecommitdiff
path: root/numpy/linalg/linalg.py
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/linalg/linalg.py')
-rw-r--r--numpy/linalg/linalg.py169
1 files changed, 106 insertions, 63 deletions
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 31147b9cc..d2ae7befc 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -19,12 +19,13 @@ __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
import warnings
from numpy.core import (
- array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
+ array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
- finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
- broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones
- )
+ finfo, errstate, geterrobj, longdouble, moveaxis, amin, amax, product, abs,
+ broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones, matmul,
+ swapaxes, divide, count_nonzero
+)
from numpy.core.multiarray import normalize_axis_index
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
@@ -69,12 +70,8 @@ class LinAlgError(Exception):
"""
pass
-# Dealing with errors in _umath_linalg
-
-_linalg_error_extobj = None
def _determine_error_states():
- global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
@@ -82,9 +79,11 @@ def _determine_error_states():
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
- _linalg_error_extobj = [bufsize, invalid_call_errmask, None]
+ return [bufsize, invalid_call_errmask, None]
-_determine_error_states()
+# Dealing with errors in _umath_linalg
+_linalg_error_extobj = _determine_error_states()
+del _determine_error_states
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
@@ -99,7 +98,7 @@ def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
- extobj = list(_linalg_error_extobj)
+ extobj = list(_linalg_error_extobj) # make a copy
extobj[2] = callback
return extobj
@@ -225,6 +224,22 @@ def _assertNoEmpty2d(*arrays):
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
+def transpose(a):
+ """
+ Transpose each matrix in a stack of matrices.
+
+ Unlike np.transpose, this only swaps the last two axes, rather than all of
+ them
+
+ Parameters
+ ----------
+ a : (...,M,N) array_like
+
+ Returns
+ -------
+ aT : (...,N,M) ndarray
+ """
+ return swapaxes(a, -1, -2)
# Linear equations
@@ -1281,7 +1296,7 @@ def eigh(a, UPLO='L'):
# Singular value decomposition
-def svd(a, full_matrices=1, compute_uv=1):
+def svd(a, full_matrices=True, compute_uv=True):
"""
Singular Value Decomposition.
@@ -1489,22 +1504,34 @@ def cond(x, p=None):
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
-def matrix_rank(M, tol=None):
+def matrix_rank(M, tol=None, hermitian=False):
"""
Return matrix rank of array using SVD method
- Rank of the array is the number of SVD singular values of the array that are
+ Rank of the array is the number of singular values of the array that are
greater than `tol`.
+ .. versionchanged:: 1.14
+ Can now operate on stacks of matrices
+
Parameters
----------
M : {(M,), (..., M, N)} array_like
input vector or stack of matrices
- tol : {None, float}, optional
- threshold below which SVD values are considered zero. If `tol` is
- None, and ``S`` is an array with singular values for `M`, and
- ``eps`` is the epsilon value for datatype of ``S``, then `tol` is
- set to ``S.max() * max(M.shape) * eps``.
+ tol : (...) array_like, float, optional
+ threshold below which SVD values are considered zero. If `tol` is
+ None, and ``S`` is an array with singular values for `M`, and
+ ``eps`` is the epsilon value for datatype of ``S``, then `tol` is
+ set to ``S.max() * max(M.shape) * eps``.
+
+ .. versionchanged:: 1.14
+ Broadcasted against the stack of matrices
+ hermitian : bool, optional
+ If True, `M` is assumed to be Hermitian (symmetric if real-valued),
+ enabling a more efficient method for finding singular values.
+ Defaults to False.
+
+ .. versionadded:: 1.14
Notes
-----
@@ -1568,10 +1595,15 @@ def matrix_rank(M, tol=None):
M = asarray(M)
if M.ndim < 2:
return int(not all(M==0))
- S = svd(M, compute_uv=False)
+ if hermitian:
+ S = abs(eigvalsh(M))
+ else:
+ S = svd(M, compute_uv=False)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
- return (S > tol).sum(axis=-1)
+ else:
+ tol = asarray(tol)[..., newaxis]
+ return count_nonzero(S > tol, axis=-1)
# Generalized inverse
@@ -1584,26 +1616,29 @@ def pinv(a, rcond=1e-15 ):
singular-value decomposition (SVD) and including all
*large* singular values.
+ .. versionchanged:: 1.14
+ Can now operate on stacks of matrices
+
Parameters
----------
- a : (M, N) array_like
- Matrix to be pseudo-inverted.
- rcond : float
- Cutoff for small singular values.
- Singular values smaller (in modulus) than
- `rcond` * largest_singular_value (again, in modulus)
- are set to zero.
+ a : (..., M, N) array_like
+ Matrix or stack of matrices to be pseudo-inverted.
+ rcond : (...) array_like of float
+ Cutoff for small singular values.
+ Singular values smaller (in modulus) than
+ `rcond` * largest_singular_value (again, in modulus)
+ are set to zero. Broadcasts against the stack of matrices
Returns
-------
- B : (N, M) ndarray
- The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
- is `B`.
+ B : (..., N, M) ndarray
+ The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
+ is `B`.
Raises
------
LinAlgError
- If the SVD computation does not converge.
+ If the SVD computation does not converge.
Notes
-----
@@ -1640,20 +1675,20 @@ def pinv(a, rcond=1e-15 ):
"""
a, wrap = _makearray(a)
+ rcond = asarray(rcond)
if _isEmpty2d(a):
res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
- u, s, vt = svd(a, 0)
- m = u.shape[0]
- n = vt.shape[1]
- cutoff = rcond*maximum.reduce(s)
- for i in range(min(n, m)):
- if s[i] > cutoff:
- s[i] = 1./s[i]
- else:
- s[i] = 0.
- res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
+ u, s, vt = svd(a, full_matrices=False)
+
+ # discard small singular values
+ cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
+ large = s > cutoff
+ s = divide(1, s, where=large, out=s)
+ s[~large] = 0
+
+ res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
# Determinant
@@ -1810,7 +1845,7 @@ def det(a):
# Linear Least Squares
-def lstsq(a, b, rcond=-1):
+def lstsq(a, b, rcond="warn"):
"""
Return the least-squares solution to a linear matrix equation.
@@ -1836,6 +1871,13 @@ def lstsq(a, b, rcond=-1):
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
+ .. versionchanged:: 1.14.0
+ If not set, a FutureWarning is given. The previous default
+ of ``-1`` will use the machine precision as `rcond` parameter,
+ the new default will use the machine precision times `max(M, N)`.
+ To silence the warning and use the new default, use ``rcond=None``,
+ to keep using the old behavior, use ``rcond=-1``.
+
Returns
-------
x : {(N,), (N, K)} ndarray
@@ -1909,6 +1951,20 @@ def lstsq(a, b, rcond=-1):
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
+ # Determine default rcond value
+ if rcond == "warn":
+ # 2017-08-19, 1.14.0
+ warnings.warn("`rcond` parameter will change to the default of "
+ "machine precision times ``max(M, N)`` where M and N "
+ "are the input matrix dimensions.\n"
+ "To use the future default and silence this warning "
+ "we advise to pass `rcond=None`, to keep using the old, "
+ "explicitly pass `rcond=-1`.",
+ FutureWarning, stacklevel=2)
+ rcond = -1
+ if rcond is None:
+ rcond = finfo(t).eps * ldb
+
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
@@ -1968,13 +2024,13 @@ def lstsq(a, b, rcond=-1):
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
- x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
+ x = array(bstar.T[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
- resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
+ resids = sum(abs(bstar.T[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
- resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
+ resids = sum((bstar.T[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
@@ -2004,9 +2060,7 @@ def _multi_svd_norm(x, row_axis, col_axis, op):
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
- if row_axis > col_axis:
- row_axis -= 1
- y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
+ y = moveaxis(x, (row_axis, col_axis), (-2, -1))
result = op(svd(y, compute_uv=0), axis=-1)
return result
@@ -2177,7 +2231,7 @@ def norm(x, ord=None, axis=None, keepdims=False):
elif not isinstance(axis, tuple):
try:
axis = int(axis)
- except:
+ except Exception:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
@@ -2201,18 +2255,7 @@ def norm(x, ord=None, axis=None, keepdims=False):
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
- if x.dtype.type is longdouble:
- # Convert to a float type, so integer arrays give
- # float results. Don't apply asfarray to longdouble arrays,
- # because it will downcast to float64.
- absx = abs(x)
- else:
- absx = x if isComplexType(x.dtype.type) else asfarray(x)
- if absx.dtype is x.dtype:
- absx = abs(absx)
- else:
- # if the type changed, we can safely overwrite absx
- abs(absx, out=absx)
+ absx = abs(x)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
@@ -2327,7 +2370,7 @@ def multi_dot(arrays):
return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
- :math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
+ :math:`A_{10x100}, B_{100x5}, C_{5x50}`.
The costs for the two different parenthesizations are as follows::