summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/add_newdocs.py18
-rw-r--r--numpy/build_utils/common.py5
-rw-r--r--numpy/core/arrayprint.py3
-rw-r--r--numpy/core/bscript7
-rw-r--r--numpy/core/code_generators/generate_umath.py11
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py8
-rw-r--r--numpy/core/fromnumeric.py1
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h12
-rw-r--r--numpy/core/setup.py9
-rw-r--r--numpy/core/setup_common.py5
-rw-r--r--numpy/core/src/multiarray/common.c12
-rw-r--r--numpy/core/src/multiarray/ctors.c22
-rw-r--r--numpy/core/src/multiarray/flagsobject.c56
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c19
-rw-r--r--numpy/core/src/multiarray/shape.c13
-rw-r--r--numpy/core/tests/test_api.py42
-rw-r--r--numpy/core/tests/test_multiarray.py4
-rw-r--r--numpy/core/tests/test_nditer.py14
-rw-r--r--numpy/core/tests/test_regression.py7
-rw-r--r--numpy/distutils/command/config.py2
-rw-r--r--numpy/distutils/mingw32ccompiler.py9
-rw-r--r--numpy/distutils/misc_util.py60
-rw-r--r--numpy/lib/_iotools.py5
-rw-r--r--numpy/lib/arraypad.py4
-rw-r--r--numpy/lib/format.py2
-rw-r--r--numpy/lib/function_base.py2
-rw-r--r--numpy/lib/index_tricks.py3
-rw-r--r--numpy/lib/npyio.py2
-rw-r--r--numpy/lib/polynomial.py2
-rw-r--r--numpy/lib/shape_base.py2
-rw-r--r--numpy/lib/utils.py2
-rw-r--r--numpy/linalg/linalg.py119
-rw-r--r--numpy/linalg/tests/test_deprecations.py24
-rw-r--r--numpy/linalg/tests/test_linalg.py104
-rw-r--r--numpy/ma/core.py7
-rw-r--r--numpy/ma/extras.py2
-rw-r--r--numpy/matrixlib/defmatrix.py4
-rw-r--r--numpy/polynomial/chebyshev.py4
-rw-r--r--numpy/polynomial/hermite.py2
-rw-r--r--numpy/polynomial/hermite_e.py4
-rw-r--r--numpy/polynomial/laguerre.py4
-rw-r--r--numpy/polynomial/legendre.py2
-rw-r--r--numpy/polynomial/polynomial.py2
-rw-r--r--numpy/polynomial/polyutils.py2
-rw-r--r--numpy/random/mtrand/mtrand.pyx1
-rw-r--r--numpy/testing/utils.py2
46 files changed, 485 insertions, 161 deletions
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index f2b7077c7..a534e852c 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -211,7 +211,7 @@ add_newdoc('numpy.core', 'nditer',
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
- order : {'C', 'F', 'A', or 'K'}, optional
+ order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
@@ -1503,7 +1503,7 @@ add_newdoc('numpy.core.multiarray', 'lexsort',
Parameters
----------
- keys : (k,N) array or tuple containing k (N,)-shaped sequences
+ keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
@@ -1598,7 +1598,6 @@ add_newdoc('numpy.core.multiarray', 'can_cast',
Examples
--------
-
Basic examples
>>> np.can_cast(np.int32, np.int64)
@@ -1978,7 +1977,7 @@ add_newdoc('numpy.core', 'einsum',
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions.
- order : {'C', 'F', 'A', or 'K'}, optional
+ order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
@@ -3047,7 +3046,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
- order : {'C', 'F', 'A', or 'K'}, optional
+ order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
@@ -3083,7 +3082,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
Raises
------
- ComplexWarning :
+ ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
@@ -3110,12 +3109,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
Parameters
----------
- inplace: bool, optional
+ inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
- out: ndarray
+ out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
@@ -5058,7 +5057,6 @@ add_newdoc('numpy.lib._compiled_base', 'add_newdoc_ufunc',
Notes
-----
-
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
@@ -5900,7 +5898,6 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
Examples
--------
-
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print dt.fields
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
@@ -6008,7 +6005,6 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('names',
Examples
--------
-
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
diff --git a/numpy/build_utils/common.py b/numpy/build_utils/common.py
index 301d1da7d..50d2f59bc 100644
--- a/numpy/build_utils/common.py
+++ b/numpy/build_utils/common.py
@@ -27,13 +27,14 @@ def pyod(filename):
Parameters
----------
- filename: str
+ filename : str
name of the file to get the dump from.
Returns
-------
- out: seq
+ out : seq
list of lines of od output
+
Note
----
We only implement enough to get the necessary information for long double
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index 2675b0ea1..6e53dca00 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -399,7 +399,8 @@ def array2string(a, max_line_width=None, precision=None,
Raises
------
- TypeError : if a callable in `formatter` does not return a string.
+ TypeError
+ if a callable in `formatter` does not return a string.
See Also
--------
diff --git a/numpy/core/bscript b/numpy/core/bscript
index 3d9b84018..3a2bb2340 100644
--- a/numpy/core/bscript
+++ b/numpy/core/bscript
@@ -31,6 +31,7 @@ from setup_common \
MANDATORY_FUNCS, C_ABI_VERSION, C_API_VERSION
ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
+NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "0") != "0")
NUMPYCONFIG_SYM = []
@@ -39,6 +40,12 @@ if ENABLE_SEPARATE_COMPILATION:
NUMPYCONFIG_SYM.append(('DEFINE_NPY_ENABLE_SEPARATE_COMPILATION', '#define NPY_ENABLE_SEPARATE_COMPILATION 1'))
else:
NUMPYCONFIG_SYM.append(('DEFINE_NPY_ENABLE_SEPARATE_COMPILATION', ''))
+
+if NPY_RELAXED_STRIDES_CHECKING:
+ NUMPYCONFIG_SYM.append(('DEFINE_NPY_RELAXED_STRIDES_CHECKING', '#define NPY_RELAXED_STRIDES_CHECKING 1'))
+else:
+ NUMPYCONFIG_SYM.append(('DEFINE_NPY_RELAXED_STRIDES_CHECKING', ''))
+
NUMPYCONFIG_SYM.append(('VISIBILITY_HIDDEN', '__attribute__((visibility("hidden")))'))
NUMPYCONFIG_SYM.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 6dd3d4876..ebbf63f20 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -96,12 +96,11 @@ class Ufunc(object):
Attributes
----------
-
- nin: number of input arguments
- nout: number of output arguments
- identity: identity element for a two-argument function
- docstring: docstring for the ufunc
- type_descriptions: list of TypeDescription objects
+ nin : number of input arguments
+ nout : number of output arguments
+ identity : identity element for a two-argument function
+ docstring : docstring for the ufunc
+ type_descriptions : list of TypeDescription objects
"""
def __init__(self, nin, nout, identity, docstring, typereso,
*type_descriptions):
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index ff947acb8..b990e81ea 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -2800,7 +2800,7 @@ add_newdoc('numpy.core.umath', 'signbit',
Parameters
----------
- x: array_like
+ x : array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved
@@ -2831,9 +2831,9 @@ add_newdoc('numpy.core.umath', 'copysign',
Parameters
----------
- x1: array_like
+ x1 : array_like
Values to change the sign of.
- x2: array_like
+ x2 : array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
@@ -2896,7 +2896,7 @@ add_newdoc('numpy.core.umath', 'spacing',
Parameters
----------
- x1: array_like
+ x1 : array_like
Values to find the spacing of.
Returns
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 0080c38ce..e7211cede 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -149,7 +149,6 @@ def reshape(a, newshape, order='C'):
Notes
-----
-
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raise if the data is copied,
you should assign the new shape to the shape attribute of the array::
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index 05bac8a1f..7cc37bff8 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -753,9 +753,15 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);
#define NPY_ARRAY_F_CONTIGUOUS 0x0002
/*
- * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. An N-d
- * array that is C_CONTIGUOUS is also F_CONTIGUOUS if only
- * one axis has a dimension different from one (ie. a 1x3x1 array).
+ * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a
+ * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with
+ * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS
+ * at the same time if they have either zero or one element.
+ * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional
+ * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements
+ * and the array is contiguous if ndarray.squeeze() is contiguous.
+ * I.e. dimensions for which `ndarray.shape[dimension] == 1` are
+ * ignored.
*/
/*
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 9fe141daf..ea20b11d2 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -17,6 +17,9 @@ from setup_common import *
# Set to True to enable multiple file compilations (experimental)
ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
+# Set to True to enable relaxed strides checking. This (mostly) means
+# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
+NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "0") != "0")
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
@@ -435,6 +438,9 @@ def configuration(parent_package='',top_path=None):
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))
+ if NPY_RELAXED_STRIDES_CHECKING:
+ moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
+
# Get long double representation
if sys.platform != 'darwin':
rep = check_long_double_representation(config_cmd)
@@ -532,6 +538,9 @@ def configuration(parent_package='',top_path=None):
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))
+ if NPY_RELAXED_STRIDES_CHECKING:
+ moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
+
# Check wether we can use inttypes (C99) formats
if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']):
moredefs.append(('NPY_USE_C99_FORMATS', 1))
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index 056cf9ec0..53f3b76e1 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -163,13 +163,14 @@ def pyod(filename):
Parameters
----------
- filename: str
+ filename : str
name of the file to get the dump from.
Returns
-------
- out: seq
+ out : seq
list of lines of od output
+
Note
----
We only implement enough to get the necessary information for long double
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index f27de491f..f0a6a761c 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -628,8 +628,20 @@ _IsAligned(PyArrayObject *ap)
}
ptr = (npy_intp) PyArray_DATA(ap);
aligned = (ptr % alignment) == 0;
+
for (i = 0; i < PyArray_NDIM(ap); i++) {
+#if NPY_RELAXED_STRIDES_CHECKING
+ if (PyArray_DIM(ap, i) > 1) {
+ /* if shape[i] == 1, the stride is never used */
+ aligned &= ((PyArray_STRIDES(ap)[i] % alignment) == 0);
+ }
+ else if (PyArray_DIM(ap, i) == 0) {
+ /* an array with zero elements is always aligned */
+ return 1;
+ }
+#else /* not NPY_RELAXED_STRIDES_CHECKING */
aligned &= ((PyArray_STRIDES(ap)[i] % alignment) == 0);
+#endif /* not NPY_RELAXED_STRIDES_CHECKING */
}
return aligned != 0;
}
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index bdf2e6e2b..f366a34b1 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -3560,6 +3560,7 @@ _array_fill_strides(npy_intp *strides, npy_intp *dims, int nd, size_t itemsize,
int inflag, int *objflags)
{
int i;
+#if NPY_RELAXED_STRIDES_CHECKING
npy_bool not_cf_contig = 0;
npy_bool nod = 0; /* A dim != 1 was found */
@@ -3573,6 +3574,7 @@ _array_fill_strides(npy_intp *strides, npy_intp *dims, int nd, size_t itemsize,
nod = 1;
}
}
+#endif /* NPY_RELAXED_STRIDES_CHECKING */
/* Only make Fortran strides if not contiguous as well */
if ((inflag & (NPY_ARRAY_F_CONTIGUOUS|NPY_ARRAY_C_CONTIGUOUS)) ==
@@ -3582,11 +3584,21 @@ _array_fill_strides(npy_intp *strides, npy_intp *dims, int nd, size_t itemsize,
if (dims[i]) {
itemsize *= dims[i];
}
+#if NPY_RELAXED_STRIDES_CHECKING
else {
not_cf_contig = 0;
}
+ if (dims[i] == 1) {
+ /* For testing purpose only */
+ strides[i] = NPY_MAX_INTP;
+ }
+#endif /* NPY_RELAXED_STRIDES_CHECKING */
}
+#if NPY_RELAXED_STRIDES_CHECKING
if (not_cf_contig) {
+#else /* not NPY_RELAXED_STRIDES_CHECKING */
+ if ((nd > 1) && ((strides[0] != strides[nd-1]) || (dims[nd-1] > 1))) {
+#endif /* not NPY_RELAXED_STRIDES_CHECKING */
*objflags = ((*objflags)|NPY_ARRAY_F_CONTIGUOUS) &
~NPY_ARRAY_C_CONTIGUOUS;
}
@@ -3600,11 +3612,21 @@ _array_fill_strides(npy_intp *strides, npy_intp *dims, int nd, size_t itemsize,
if (dims[i]) {
itemsize *= dims[i];
}
+#if NPY_RELAXED_STRIDES_CHECKING
else {
not_cf_contig = 0;
}
+ if (dims[i] == 1) {
+ /* For testing purpose only */
+ strides[i] = NPY_MAX_INTP;
+ }
+#endif /* NPY_RELAXED_STRIDES_CHECKING */
}
+#if NPY_RELAXED_STRIDES_CHECKING
if (not_cf_contig) {
+#else /* not NPY_RELAXED_STRIDES_CHECKING */
+ if ((nd > 1) && ((strides[0] != strides[nd-1]) || (dims[0] > 1))) {
+#endif /* not NPY_RELAXED_STRIDES_CHECKING */
*objflags = ((*objflags)|NPY_ARRAY_C_CONTIGUOUS) &
~NPY_ARRAY_F_CONTIGUOUS;
}
diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c
index ef04bdb20..0ad5c908a 100644
--- a/numpy/core/src/multiarray/flagsobject.c
+++ b/numpy/core/src/multiarray/flagsobject.c
@@ -90,8 +90,33 @@ PyArray_UpdateFlags(PyArrayObject *ret, int flagmask)
* Check whether the given array is stored contiguously
* in memory. And update the passed in ap flags apropriately.
*
- * A dimension == 1 stride is ignored for contiguous flags and a 0-sized array
- * is always both C- and F-Contiguous. 0-strided arrays are not contiguous.
+ * The traditional rule is that for an array to be flagged as C contiguous,
+ * the following must hold:
+ *
+ * strides[-1] == itemsize
+ * strides[i] == shape[i+1] * strides[i + 1]
+ *
+ * And for an array to be flagged as F contiguous, the obvious reversal:
+ *
+ * strides[0] == itemsize
+ * strides[i] == shape[i - 1] * strides[i - 1]
+ *
+ * According to these rules, a 0- or 1-dimensional array is either both
+ * C- and F-contiguous, or neither; and an array with 2+ dimensions
+ * can be C- or F- contiguous, or neither, but not both. Though there
+ * there are exceptions for arrays with zero or one item, in the first
+ * case the check is relaxed up to and including the first dimension
+ * with shape[i] == 0. In the second case `strides == itemsize` will
+ * can be true for all dimensions and both flags are set.
+ *
+ * When NPY_RELAXED_STRIDES_CHECKING is set, we use a more accurate
+ * definition of C- and F-contiguity, in which all 0-sized arrays are
+ * contiguous (regardless of dimensionality), and if shape[i] == 1
+ * then we ignore strides[i] (since it has no affect on memory layout).
+ * With these new rules, it is possible for e.g. a 10x1 array to be both
+ * C- and F-contiguous -- but, they break downstream code which assumes
+ * that for contiguous arrays strides[-1] (resp. strides[0]) always
+ * contains the itemsize.
*/
static void
_UpdateContiguousFlags(PyArrayObject *ap)
@@ -101,9 +126,10 @@ _UpdateContiguousFlags(PyArrayObject *ap)
int i;
npy_bool is_c_contig = 1;
- sd = PyArray_DESCR(ap)->elsize;
+ sd = PyArray_ITEMSIZE(ap);
for (i = PyArray_NDIM(ap) - 1; i >= 0; --i) {
dim = PyArray_DIMS(ap)[i];
+#if NPY_RELAXED_STRIDES_CHECKING
/* contiguous by definition */
if (dim == 0) {
PyArray_ENABLEFLAGS(ap, NPY_ARRAY_C_CONTIGUOUS);
@@ -116,6 +142,17 @@ _UpdateContiguousFlags(PyArrayObject *ap)
}
sd *= dim;
}
+#else /* not NPY_RELAXED_STRIDES_CHECKING */
+ if (PyArray_STRIDES(ap)[i] != sd) {
+ is_c_contig = 0;
+ break;
+ }
+ /* contiguous, if it got this far */
+ if (dim == 0) {
+ break;
+ }
+ sd *= dim;
+#endif /* not NPY_RELAXED_STRIDES_CHECKING */
}
if (is_c_contig) {
PyArray_ENABLEFLAGS(ap, NPY_ARRAY_C_CONTIGUOUS);
@@ -125,9 +162,10 @@ _UpdateContiguousFlags(PyArrayObject *ap)
}
/* check if fortran contiguous */
- sd = PyArray_DESCR(ap)->elsize;
+ sd = PyArray_ITEMSIZE(ap);
for (i = 0; i < PyArray_NDIM(ap); ++i) {
dim = PyArray_DIMS(ap)[i];
+#if NPY_RELAXED_STRIDES_CHECKING
if (dim != 1) {
if (PyArray_STRIDES(ap)[i] != sd) {
PyArray_CLEARFLAGS(ap, NPY_ARRAY_F_CONTIGUOUS);
@@ -135,6 +173,16 @@ _UpdateContiguousFlags(PyArrayObject *ap)
}
sd *= dim;
}
+#else /* not NPY_RELAXED_STRIDES_CHECKING */
+ if (PyArray_STRIDES(ap)[i] != sd) {
+ PyArray_CLEARFLAGS(ap, NPY_ARRAY_F_CONTIGUOUS);
+ return;
+ }
+ if (dim == 0) {
+ break;
+ }
+ sd *= dim;
+#endif /* not NPY_RELAXED_STRIDES_CHECKING */
}
PyArray_ENABLEFLAGS(ap, NPY_ARRAY_F_CONTIGUOUS);
return;
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index f8ade57da..dd6d44003 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -1510,20 +1510,31 @@ PyArray_EquivTypenums(int typenum1, int typenum2)
}
/*** END C-API FUNCTIONS **/
-
+/*
+ * NPY_RELAXED_STRIDES_CHECKING: If the strides logic is changed, the
+ * order specific stride setting is not necessary.
+ */
static PyObject *
-_prepend_ones(PyArrayObject *arr, int nd, int ndmin)
+_prepend_ones(PyArrayObject *arr, int nd, int ndmin, NPY_ORDER order)
{
npy_intp newdims[NPY_MAXDIMS];
npy_intp newstrides[NPY_MAXDIMS];
+ npy_intp newstride;
int i, k, num;
PyArrayObject *ret;
PyArray_Descr *dtype;
+ if (order == NPY_FORTRANORDER || PyArray_ISFORTRAN(arr) || PyArray_NDIM(arr) == 0) {
+ newstride = PyArray_DESCR(arr)->elsize;
+ }
+ else {
+ newstride = PyArray_STRIDES(arr)[0] * PyArray_DIMS(arr)[0];
+ }
+
num = ndmin - nd;
for (i = 0; i < num; i++) {
newdims[i] = 1;
- newstrides[i] = PyArray_DESCR(arr)->elsize;
+ newstrides[i] = newstride;
}
for (i = num; i < ndmin; i++) {
k = i - num;
@@ -1664,7 +1675,7 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
* create a new array from the same data with ones in the shape
* steals a reference to ret
*/
- return _prepend_ones(ret, nd, ndmin);
+ return _prepend_ones(ret, nd, ndmin, order);
clean_type:
Py_XDECREF(type);
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index 4223e49f6..67ee4b04b 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -214,9 +214,11 @@ PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims,
* in order to get the right orientation and
* because we can't just re-use the buffer with the
* data in the order it is in.
+ * NPY_RELAXED_STRIDES_CHECKING: size check is unnecessary when set.
*/
- if ((order == NPY_CORDER && !PyArray_IS_C_CONTIGUOUS(self)) ||
- (order == NPY_FORTRANORDER && !PyArray_IS_F_CONTIGUOUS(self))) {
+ if ((PyArray_SIZE(self) > 1) &&
+ ((order == NPY_CORDER && !PyArray_IS_C_CONTIGUOUS(self)) ||
+ (order == NPY_FORTRANORDER && !PyArray_IS_F_CONTIGUOUS(self)))) {
int success = 0;
success = _attempt_nocopy_reshape(self, ndim, dimensions,
newstrides, order);
@@ -1102,7 +1104,9 @@ build_shape_string(npy_intp n, npy_intp *vals)
* the array will point to invalid memory. The caller must
* validate this!
* If an axis flagged for removal has a shape larger then one,
- * the arrays contiguous flags may require updating.
+ * the aligned flag (and in the future the contiguous flags),
+ * may need explicite update.
+ * (check also NPY_RELAXED_STRIDES_CHECKING)
*
* For example, this can be used to remove the reduction axes
* from a reduction result once its computation is complete.
@@ -1125,4 +1129,7 @@ PyArray_RemoveAxesInPlace(PyArrayObject *arr, npy_bool *flags)
/* The final number of dimensions */
fa->nd = idim_out;
+
+ /* May not be necessary for NPY_RELAXED_STRIDES_CHECKING (see comment) */
+ PyArray_UpdateFlags(arr, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS);
}
diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py
index 92e7d936c..d93d6477a 100644
--- a/numpy/core/tests/test_api.py
+++ b/numpy/core/tests/test_api.py
@@ -7,6 +7,9 @@ from numpy.testing import *
from numpy.testing.utils import WarningManager
import warnings
+# Switch between new behaviour when NPY_RELAXED_STRIDES_CHECKING is set.
+NPY_RELAXED_STRIDES_CHECKING = np.ones((10,1), order='C').flags.f_contiguous
+
def test_fastCopyAndTranspose():
# 0D array
a = np.array(2)
@@ -149,10 +152,13 @@ def test_copy_order():
assert_equal(x, y)
assert_equal(res.flags.c_contiguous, ccontig)
assert_equal(res.flags.f_contiguous, fcontig)
- if strides:
- assert_equal(x.strides, y.strides)
- else:
- assert_(x.strides != y.strides)
+ # This check is impossible only because
+ # NPY_RELAXED_STRIDES_CHECKING changes the strides actively
+ if not NPY_RELAXED_STRIDES_CHECKING:
+ if strides:
+ assert_equal(x.strides, y.strides)
+ else:
+ assert_(x.strides != y.strides)
# Validate the initial state of a, b, and c
assert_(a.flags.c_contiguous)
@@ -206,7 +212,8 @@ def test_copy_order():
def test_contiguous_flags():
a = np.ones((4,4,1))[::2,:,:]
- a.strides = a.strides[:2] + (-123,)
+ if NPY_RELAXED_STRIDES_CHECKING:
+ a.strides = a.strides[:2] + (-123,)
b = np.ones((2,2,1,2,2)).swapaxes(3,4)
def check_contig(a, ccontig, fcontig):
@@ -216,8 +223,12 @@ def test_contiguous_flags():
# Check if new arrays are correct:
check_contig(a, False, False)
check_contig(b, False, False)
- check_contig(np.empty((2,2,0,2,2)), True, True)
- check_contig(np.array([[[1],[2]]], order='F'), True, True)
+ if NPY_RELAXED_STRIDES_CHECKING:
+ check_contig(np.empty((2,2,0,2,2)), True, True)
+ check_contig(np.array([[[1],[2]]], order='F'), True, True)
+ else:
+ check_contig(np.empty((2,2,0,2,2)), True, False)
+ check_contig(np.array([[[1],[2]]], order='F'), False, True)
check_contig(np.empty((2,2)), True, False)
check_contig(np.empty((2,2), order='F'), False, True)
@@ -226,11 +237,18 @@ def test_contiguous_flags():
check_contig(np.array(a, copy=False, order='C'), True, False)
check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True)
- # Check slicing update of flags and :
- check_contig(a[0], True, True)
- check_contig(a[None,::4,...,None], True, True)
- check_contig(b[0,0,...], False, True)
- check_contig(b[:,:,0:0,:,:], True, True)
+ if NPY_RELAXED_STRIDES_CHECKING:
+ # Check slicing update of flags and :
+ check_contig(a[0], True, True)
+ check_contig(a[None,::4,...,None], True, True)
+ check_contig(b[0,0,...], False, True)
+ check_contig(b[:,:,0:0,:,:], True, True)
+ else:
+ # Check slicing update of flags:
+ check_contig(a[0], True, False)
+ # Would be nice if this was C-Contiguous:
+ check_contig(a[None,0,...,None], False, False)
+ check_contig(b[0,0,0,...], False, True)
# Test ravel and squeeze.
check_contig(a.ravel(), True, True)
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index f4f51b414..bca255f21 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -2764,7 +2764,9 @@ if sys.version_info >= (2, 6):
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:^q:dx:B:e:@H:f:=I:g:L:h:^Q:hx:=f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:^q:dx:B:e:@H:f:=I:g:Q:h:^Q:hx:=f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
- assert_equal(y.strides, (sz,))
+ # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
+ if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
+ assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index ec5f63c5e..39b0c13ec 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -2029,38 +2029,38 @@ def test_iter_buffered_reduce_reuse():
op_dtypes = [np.float, a.dtype]
def get_params():
- for xs in xrange(-3**2, 3**2 + 1):
- for ys in xrange(xs, 3**2 + 1):
+ for xs in range(-3**2, 3**2 + 1):
+ for ys in range(xs, 3**2 + 1):
for op_axes in op_axes_list:
# last stride is reduced and because of that not
# important for this test, as it is the inner stride.
strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize)
arr = np.lib.stride_tricks.as_strided(a, (3,3,3), strides)
-
+
for skip in [0, 1]:
yield arr, op_axes, skip
-
+
for arr, op_axes, skip in get_params():
nditer2 = np.nditer([arr.copy(), None],
op_axes=op_axes, flags=flags, op_flags=op_flags,
op_dtypes=op_dtypes)
nditer2.operands[-1][...] = 0
nditer2.reset()
- nditer2.iterindex = skip
+ nditer2.iterindex = skip
for (a2_in, b2_in) in nditer2:
b2_in += a2_in.astype(np.int_)
comp_res = nditer2.operands[-1]
- for bufsize in xrange(0, 3**3):
+ for bufsize in range(0, 3**3):
nditer1 = np.nditer([arr, None],
op_axes=op_axes, flags=flags, op_flags=op_flags,
buffersize=bufsize, op_dtypes=op_dtypes)
nditer1.operands[-1][...] = 0
nditer1.reset()
nditer1.iterindex = skip
-
+
for (a1_in, b1_in) in nditer1:
b1_in += a1_in.astype(np.int_)
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 575fb381c..8d3b35bb9 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -543,6 +543,9 @@ class TestRegression(TestCase):
a = np.ones((0,2))
a.shape = (-1,2)
+ # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
+ # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.
+ @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_reshape_trailing_ones_strides(self):
# Github issue gh-2949, bad strides for trailing ones of new shape
a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
@@ -794,6 +797,10 @@ class TestRegression(TestCase):
"""Ticket #658"""
np.indices((0,3,4)).T.reshape(-1,3)
+ # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
+ # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,
+ # 0-sized reshape itself is tested elsewhere.
+ @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_copy_detection_corner_case2(self, level=rlevel):
"""Ticket #771: strides are not set correctly when reshaping 0-sized
arrays"""
diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py
index 055fd26a9..470ca07c9 100644
--- a/numpy/distutils/command/config.py
+++ b/numpy/distutils/command/config.py
@@ -340,7 +340,7 @@ int main ()
Arguments
---------
- funcs: seq
+ funcs : seq
list of functions to test
include_dirs : seq
list of header paths
diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py
index ed0dc0d4e..c07a58e26 100644
--- a/numpy/distutils/mingw32ccompiler.py
+++ b/numpy/distutils/mingw32ccompiler.py
@@ -512,10 +512,13 @@ def manifest_rc(name, type='dll'):
'exe').
Parameters
- ---------- name: str
+ ----------
+ name : str
name of the manifest file to embed
- type: str ('dll', 'exe')
- type of the binary which will embed the manifest"""
+ type : str {'dll', 'exe'}
+ type of the binary which will embed the manifest
+
+ """
if type == 'dll':
rctype = 2
elif type == 'exe':
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 0c88e0ae4..dea993b20 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -878,14 +878,14 @@ class Configuration(object):
Parameters
----------
- subpackage_name: str,None
+ subpackage_name : str or None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
- subpackage_path: str
+ subpackage_path : str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
- parent_name: str
+ parent_name : str
Parent name.
"""
if subpackage_name is None:
@@ -941,13 +941,13 @@ class Configuration(object):
Parameters
----------
- subpackage_name: str
+ subpackage_name : str
name of the subpackage
- subpackage_path: str
+ subpackage_path : str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
- standalone: bool
+ standalone : bool
"""
if standalone:
@@ -985,10 +985,10 @@ class Configuration(object):
Parameters
----------
- data_path: seq,str
+ data_path : seq or str
Argument can be either
- * 2-sequence (<datadir suffix>,<path to data directory>)
+ * 2-sequence (<datadir suffix>, <path to data directory>)
* path to data directory where python datadir suffix defaults
to package dir.
@@ -1107,7 +1107,7 @@ class Configuration(object):
Parameters
----------
- files: sequence
+ files : sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
@@ -1286,7 +1286,7 @@ class Configuration(object):
Parameters
----------
- files: str, seq
+ files : str or seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
@@ -1341,9 +1341,9 @@ class Configuration(object):
Parameters
----------
- name: str
+ name : str
name of the extension
- sources: seq
+ sources : seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
@@ -1351,28 +1351,28 @@ class Configuration(object):
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
- include_dirs:
- define_macros:
- undef_macros:
- library_dirs:
- libraries:
- runtime_library_dirs:
- extra_objects:
- extra_compile_args:
- extra_link_args:
- extra_f77_compile_args:
- extra_f90_compile_args:
- export_symbols:
- swig_opts:
- depends:
+ include_dirs :
+ define_macros :
+ undef_macros :
+ library_dirs :
+ libraries :
+ runtime_library_dirs :
+ extra_objects :
+ extra_compile_args :
+ extra_link_args :
+ extra_f77_compile_args :
+ extra_f90_compile_args :
+ export_symbols :
+ swig_opts :
+ depends :
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
- language:
- f2py_options:
- module_dirs:
- extra_info: dict,list
+ language :
+ f2py_options :
+ module_dirs :
+ extra_info : dict or list
dict or list of dict of keywords to be appended to keywords.
Notes
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index b8a01bafc..f3097be23 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -91,7 +91,8 @@ def has_nested_fields(ndtype):
Raises
------
- AttributeError : If `ndtype` does not have a `names` attribute.
+ AttributeError
+ If `ndtype` does not have a `names` attribute.
Examples
--------
@@ -271,7 +272,7 @@ class NameValidator(object):
* If 'lower', field names are converted to lower case.
The default value is True.
- replace_space: '_', optional
+ replace_space : '_', optional
Character(s) used in replacement of white spaces.
Notes
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index fbf65904a..31bf609f5 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -619,10 +619,10 @@ def pad(array, pad_width, mode=None, **kwargs):
where
- vector: ndarray
+ vector : ndarray
A rank 1 array already padded with zeros. Padded values are
vector[:pad_tuple[0]] and vector[-pad_tuple[1]:].
- iaxis_pad_width: tuple
+ iaxis_pad_width : tuple
A 2-tuple of ints, iaxis_pad_width[0] represents the number of
values padded at the beginning of vector where
iaxis_pad_width[1] represents the number of values padded at
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index dedfabfd2..b41588d6b 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -314,7 +314,7 @@ def read_array_header_1_0(fp):
Raises
------
- ValueError :
+ ValueError
If the data is invalid.
"""
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 9ca72bf41..fdcb4977a 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -2917,7 +2917,7 @@ def median(a, axis=None, out=None, overwrite_input=False):
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
- overwrite_input : bool optional
+ overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 045166b84..06ec80d23 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -652,7 +652,8 @@ def fill_diagonal(a, val, wrap=False):
Value to be written on the diagonal, its type must be compatible with
that of the array a.
- wrap: bool For tall matrices in NumPy version up to 1.6.2, the
+ wrap : bool
+ For tall matrices in NumPy version up to 1.6.2, the
diagonal "wrapped" after N columns. You can have this behavior
with this option. This affect only tall matrices.
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index eb4ffd4ce..2cd375ef9 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -285,7 +285,7 @@ def load(file, mmap_mode=None):
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
- mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
+ mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap` for a detailed description of the modes).
A memory-mapped array is kept on disk. However, it can be accessed
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 2d455e33a..3f5593432 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -169,7 +169,7 @@ def roots(p):
Raises
------
- ValueError :
+ ValueError
When `p` cannot be converted to a rank-1 array.
See also
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 909f6faa8..4f14a0c40 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -681,12 +681,10 @@ def kron(a,b):
See Also
--------
-
outer : The outer product
Notes
-----
-
The function assumes that the number of dimenensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index b81db681e..c4b692a69 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -835,7 +835,7 @@ def _lookfor_generate_cache(module, import_modules, regenerate):
Module for which to generate docstring cache
import_modules : bool
Whether to import sub-modules in packages.
- regenerate: bool
+ regenerate : bool
Re-generate the docstring cache
Returns
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index c66ab8c3a..ba242e7c6 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -16,6 +16,8 @@ __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
+import warnings
+
from numpy.core import array, asarray, zeros, empty, transpose, \
intc, single, double, csingle, cdouble, inexact, complexfloating, \
newaxis, ravel, all, Inf, dot, add, multiply, identity, sqrt, \
@@ -539,7 +541,7 @@ def cholesky(a):
# QR decompostion
-def qr(a, mode='full'):
+def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
@@ -548,24 +550,42 @@ def qr(a, mode='full'):
Parameters
----------
- a : array_like
- Matrix to be factored, of shape (M, N).
- mode : {'full', 'r', 'economic'}, optional
- Specifies the values to be returned. 'full' is the default.
- Economic mode is slightly faster then 'r' mode if only `r` is needed.
+ a : array_like, shape (M, N)
+ Matrix to be factored.
+ mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
+ If K = min(M, N), then
+
+ 'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
+ 'complete' : returns q, r with dimensions (M, M), (M, N)
+ 'r' : returns r only with dimensions (K, N)
+ 'raw' : returns h, tau with dimensions (N, M), (K,)
+ 'full' : alias of 'reduced', deprecated
+ 'economic' : returns h from 'raw', deprecated.
+
+ The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
+ see the notes for more information. The default is 'reduced' and to
+ maintain backward compatibility with earlier versions of numpy both
+ it and the old default 'full' can be omitted. Note that array h
+ returned in 'raw' mode is transposed for calling Fortran. The
+ 'economic' mode is deprecated. The modes 'full' and 'economic' may
+ be passed using only the first letter for backwards compatibility,
+ but all others must be spelled out. See the Notes for more
+ explanation.
+
Returns
-------
q : ndarray of float or complex, optional
- The orthonormal matrix, of shape (M, K). Only returned if
- ``mode='full'``.
+ A matrix with orthonormal columns. When mode = 'complete' the
+ result is an orthogonal/unitary matrix depending on whether or not
+ a is real/complex. The determinant may be either +/- 1 in that
+ case.
r : ndarray of float or complex, optional
- The upper-triangular matrix, of shape (K, N) with K = min(M, N).
- Only returned when ``mode='full'`` or ``mode='r'``.
- a2 : ndarray of float or complex, optional
- Array of shape (M, N), only returned when ``mode='economic``'.
- The diagonal and the upper triangle of `a2` contains `r`, while
- the rest of the matrix is undefined.
+ The upper-triangular matrix.
+ (h, tau) : ndarrays of np.double or np.cdouble, optional
+ The array h contains the Householder reflectors that generate q
+ along with r. The tau array contains scaling factors for the
+ reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
@@ -580,8 +600,20 @@ def qr(a, mode='full'):
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
- Subclasses of `ndarray` are preserved, so if `a` is of type `matrix`,
- all the return values will be matrices too.
+ Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
+ `a` is of type `matrix`, all the return values will be matrices too.
+
+ New 'reduced', 'complete', and 'raw' options for mode were added in
+ Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
+ addition the options 'full' and 'economic' were deprecated. Because
+ 'full' was the previous default and 'reduced' is the new default,
+ backward compatibility can be maintained by letting `mode` default.
+ The 'raw' option was added so that LAPACK routines that can multiply
+ arrays by q using the Householder reflectors can be used. Note that in
+ this case the returned arrays are of type np.double or np.cdouble and
+ the h array is transposed to be FORTRAN compatible. No routines using
+ the 'raw' return are currently exposed by numpy, but some are available
+ in lapack_lite and just await the necessary work.
Examples
--------
@@ -626,6 +658,20 @@ def qr(a, mode='full'):
array([ 1.1e-16, 1.0e+00])
"""
+ if mode not in ('reduced', 'complete', 'r', 'raw'):
+ if mode in ('f', 'full'):
+ msg = "".join((
+ "The 'full' option is deprecated in favor of 'reduced'.\n",
+ "For backward compatibility let mode default."))
+ warnings.warn(msg, DeprecationWarning)
+ mode = 'reduced'
+ elif mode in ('e', 'economic'):
+ msg = "The 'economic' option is deprecated.",
+ warnings.warn(msg, DeprecationWarning)
+ mode = 'economic'
+ else:
+ raise ValueError("Unrecognized mode '%s'" % mode)
+
a, wrap = _makearray(a)
_assertRank2(a)
_assertNonEmpty(a)
@@ -653,26 +699,30 @@ def qr(a, mode='full'):
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
-
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
- # economic mode. Isn't actually economic.
- if mode[0] == 'e':
- if t != result_t :
- a = a.astype(result_t)
- return a.T
+ # handle modes that don't return q
+ if mode == 'r':
+ r = _fastCopyAndTranspose(result_t, a[:,:mn])
+ return wrap(triu(r))
- # generate r
- r = _fastCopyAndTranspose(result_t, a[:,:mn])
- for i in range(mn):
- r[i,:i].fill(0.0)
+ if mode == 'raw':
+ return a, tau
- # 'r'-mode, that is, calculate only r
- if mode[0] == 'r':
- return r
+ if mode == 'economic':
+ if t != result_t :
+ a = a.astype(result_t)
+ return wrap(a.T)
- # from here on: build orthonormal matrix q from a
+ # generate q from a
+ if mode == 'complete' and m > n:
+ mc = m
+ q = empty((m, m), t)
+ else:
+ mc = mn
+ q = empty((n, m), t)
+ q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
@@ -684,20 +734,21 @@ def qr(a, mode='full'):
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
- results = lapack_routine(m, mn, mn, a, m, tau, work, -1, 0)
+ results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
- results = lapack_routine(m, mn, mn, a, m, tau, work, lwork, 0)
+ results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
- q = _fastCopyAndTranspose(result_t, a[:mn,:])
+ q = _fastCopyAndTranspose(result_t, q[:mc])
+ r = _fastCopyAndTranspose(result_t, a[:,:mc])
- return wrap(q), wrap(r)
+ return wrap(q), wrap(triu(r))
# Eigenvalues
diff --git a/numpy/linalg/tests/test_deprecations.py b/numpy/linalg/tests/test_deprecations.py
new file mode 100644
index 000000000..13d244199
--- /dev/null
+++ b/numpy/linalg/tests/test_deprecations.py
@@ -0,0 +1,24 @@
+"""Test deprecation and future warnings.
+
+"""
+import numpy as np
+from numpy.testing import assert_warns, run_module_suite
+
+
+def test_qr_mode_full_future_warning():
+ """Check mode='full' FutureWarning.
+
+ In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were
+ deprecated. The release date will probably be sometime in the summer
+ of 2013.
+
+ """
+ a = np.eye(2)
+ assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full')
+ assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f')
+ assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic')
+ assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e')
+
+
+if __name__ == "__main__":
+ run_module_suite()
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index 6750f059d..3f2d438f7 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -469,11 +469,115 @@ def test_reduced_rank():
class TestQR(TestCase):
+
+
+ def check_qr(self, a):
+ # This test expects the argument `a` to be an ndarray or
+ # a subclass of an ndarray of inexact type.
+ a_type = type(a)
+ a_dtype = a.dtype
+ m, n = a.shape
+ k = min(m, n)
+
+ # mode == 'complete'
+ q, r = linalg.qr(a, mode='complete')
+ assert_(q.dtype == a_dtype)
+ assert_(r.dtype == a_dtype)
+ assert_(isinstance(q, a_type))
+ assert_(isinstance(r, a_type))
+ assert_(q.shape == (m, m))
+ assert_(r.shape == (m, n))
+ assert_almost_equal(dot(q, r), a)
+ assert_almost_equal(dot(q.T.conj(), q), np.eye(m))
+ assert_almost_equal(np.triu(r), r)
+
+
+ # mode == 'reduced'
+ q1, r1 = linalg.qr(a, mode='reduced')
+ assert_(q1.dtype == a_dtype)
+ assert_(r1.dtype == a_dtype)
+ assert_(isinstance(q1, a_type))
+ assert_(isinstance(r1, a_type))
+ assert_(q1.shape == (m, k))
+ assert_(r1.shape == (k, n))
+ assert_almost_equal(dot(q1, r1), a)
+ assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k))
+ assert_almost_equal(np.triu(r1), r1)
+
+ # mode == 'r'
+ r2 = linalg.qr(a, mode='r')
+ assert_(r2.dtype == a_dtype)
+ assert_(isinstance(r2, a_type))
+ assert_almost_equal(r2, r1)
+
+
+
def test_qr_empty(self):
a = np.zeros((0,2))
self.assertRaises(linalg.LinAlgError, linalg.qr, a)
+ def test_mode_raw(self):
+ a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
+ b = a.astype(np.single)
+
+ # m > n
+ h1, tau1 = (
+ array([[-5.91607978, 0.43377175, 0.72295291],
+ [-7.43735744, 0.82807867, 0.89262383]]),
+ array([ 1.16903085, 1.113104 ])
+ )
+ # m > n
+ h2, tau2 = (
+ array([[-2.23606798, 0.61803399],
+ [-4.91934955, -0.89442719],
+ [-7.60263112, -1.78885438]]),
+ array([ 1.4472136, 0. ])
+ )
+
+ # Test double
+ h, tau = linalg.qr(a, mode='raw')
+ assert_(h.dtype == np.double)
+ assert_(tau.dtype == np.double)
+ old_assert_almost_equal(h, h1, decimal=8)
+ old_assert_almost_equal(tau, tau1, decimal=8)
+
+ h, tau = linalg.qr(a.T, mode='raw')
+ assert_(h.dtype == np.double)
+ assert_(tau.dtype == np.double)
+ old_assert_almost_equal(h, h2, decimal=8)
+ old_assert_almost_equal(tau, tau2, decimal=8)
+
+ # Test single
+ h, tau = linalg.qr(b, mode='raw')
+ assert_(h.dtype == np.double)
+ assert_(tau.dtype == np.double)
+ old_assert_almost_equal(h, h1, decimal=8)
+ old_assert_almost_equal(tau, tau1, decimal=8)
+
+
+ def test_mode_all_but_economic(self):
+ a = array([[1, 2], [3, 4]])
+ b = array([[1, 2], [3, 4], [5, 6]])
+ for dt in "fd":
+ m1 = a.astype(dt)
+ m2 = b.astype(dt)
+ self.check_qr(m1)
+ self.check_qr(m2)
+ self.check_qr(m2.T)
+ self.check_qr(matrix(m1))
+ for dt in "fd":
+ m1 = 1 + 1j * a.astype(dt)
+ m2 = 1 + 1j * b.astype(dt)
+ self.check_qr(m1)
+ self.check_qr(m2)
+ self.check_qr(m2.T)
+ self.check_qr(matrix(m1))
+
+
+
+
+
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 6a37cf1b6..64cfafe7c 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -630,7 +630,6 @@ def getdata(a, subok=True):
Examples
--------
-
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
@@ -1263,7 +1262,6 @@ def getmask(a):
Examples
--------
-
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
@@ -1325,7 +1323,6 @@ def getmaskarray(arr):
Examples
--------
-
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
@@ -1533,7 +1530,7 @@ def make_mask_none(newshape, dtype=None):
----------
newshape : tuple
A tuple indicating the shape of the mask.
- dtype: {None, dtype}, optional
+ dtype : {None, dtype}, optional
If None, use a MaskType instance. Otherwise, use a new datatype with
the same fields as `dtype`, converted to boolean types.
@@ -5653,7 +5650,7 @@ class mvoid(MaskedArray):
Returns
-------
- filled_void:
+ filled_void
A `np.void` object
See Also
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index 795e75402..a22152729 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -1296,7 +1296,7 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
Raises
------
- ValueError:
+ ValueError
Raised if some values are missing and `allow_masked` is False.
See Also
diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py
index cf47fadf8..4ecce483c 100644
--- a/numpy/matrixlib/defmatrix.py
+++ b/numpy/matrixlib/defmatrix.py
@@ -588,9 +588,9 @@ class matrix(N.ndarray):
Parameters
----------
- axis: int, optional
+ axis : int, optional
Axis along which logical OR is performed
- out: ndarray, optional
+ out : ndarray, optional
Output to existing array instead of creating new one, must have
same shape as expected output
diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py
index dde4966e7..2c2070c62 100644
--- a/numpy/polynomial/chebyshev.py
+++ b/numpy/polynomial/chebyshev.py
@@ -878,7 +878,7 @@ def chebder(c, m=1, scl=1, axis=0) :
Parameters
----------
- c: array_like
+ c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
@@ -1437,7 +1437,7 @@ def chebvander(x, deg) :
Returns
-------
- vander: ndarray
+ vander : ndarray
The pseudo Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Chebyshev polynomial. The dtype will be the same as
diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py
index 51a0f2fe0..f731817c0 100644
--- a/numpy/polynomial/hermite.py
+++ b/numpy/polynomial/hermite.py
@@ -1201,7 +1201,7 @@ def hermvander(x, deg) :
Returns
-------
- vander: ndarray
+ vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Hermite polynomial. The dtype will be the same as
diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py
index 9ae3c7067..3ecb3c58d 100644
--- a/numpy/polynomial/hermite_e.py
+++ b/numpy/polynomial/hermite_e.py
@@ -646,7 +646,7 @@ def hermeder(c, m=1, scl=1, axis=0) :
Parameters
----------
- c: array_like
+ c : array_like
Array of Hermite_e series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
@@ -1198,7 +1198,7 @@ def hermevander(x, deg) :
Returns
-------
- vander: ndarray
+ vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding HermiteE polynomial. The dtype will be the same as
diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py
index 874088b2c..a951567de 100644
--- a/numpy/polynomial/laguerre.py
+++ b/numpy/polynomial/laguerre.py
@@ -644,7 +644,7 @@ def lagder(c, m=1, scl=1, axis=0) :
Parameters
----------
- c: array_like
+ c : array_like
Array of Laguerre series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
@@ -1201,7 +1201,7 @@ def lagvander(x, deg) :
Returns
-------
- vander: ndarray
+ vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Laguerre polynomial. The dtype will be the same as
diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py
index ba49cbc57..45107f543 100644
--- a/numpy/polynomial/legendre.py
+++ b/numpy/polynomial/legendre.py
@@ -1238,7 +1238,7 @@ def legvander(x, deg) :
Returns
-------
- vander: ndarray
+ vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Legendre polynomial. The dtype will be the same as
diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py
index b9a14972e..49678cbbc 100644
--- a/numpy/polynomial/polynomial.py
+++ b/numpy/polynomial/polynomial.py
@@ -485,7 +485,7 @@ def polyder(c, m=1, scl=1, axis=0):
Parameters
----------
- c: array_like
+ c : array_like
Array of polynomial coefficients. If c is multidimensional the
different axis correspond to different variables with the degree
in each axis given by the corresponding index.
diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py
index 0d3343b04..51caacebe 100644
--- a/numpy/polynomial/polyutils.py
+++ b/numpy/polynomial/polyutils.py
@@ -138,7 +138,7 @@ def as_series(alist, trim=True) :
Raises
------
- ValueError :
+ ValueError
Raised when `as_series` cannot convert its input to 1-d arrays, or at
least one of the resulting arrays is empty.
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index 2d4d904bd..c45b85b59 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -1839,7 +1839,6 @@ cdef class RandomState:
Notes
-----
-
The F statistic is used to compare in-group variances to between-group
variances. Calculating the distribution depends on the sampling, and
so it is a function of the respective degrees of freedom in the
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index 62046814f..979894fbc 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -1302,7 +1302,7 @@ def nulp_diff(x, y, dtype=None):
Returns
-------
- nulp: array_like
+ nulp : array_like
number of representable floating point numbers between each item in x
and y.