summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/release/1.14.0-notes.rst8
-rw-r--r--doc/source/conf.py13
-rw-r--r--numpy/core/shape_base.py43
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src15
-rw-r--r--numpy/core/src/multiarray/number.c262
-rw-r--r--numpy/core/tests/test_multiarray.py28
-rw-r--r--numpy/core/tests/test_regression.py53
-rw-r--r--numpy/lib/shape_base.py26
-rw-r--r--site.cfg.example47
9 files changed, 245 insertions, 250 deletions
diff --git a/doc/release/1.14.0-notes.rst b/doc/release/1.14.0-notes.rst
index 23cfbf93f..79c678602 100644
--- a/doc/release/1.14.0-notes.rst
+++ b/doc/release/1.14.0-notes.rst
@@ -35,6 +35,7 @@ from ``np.array([1, 2, 3])[np.True_]``. This behavior is deprecated.
empty, use ``array.size > 0``.
* Calling ``np.bincount`` with ``minlength=None`` is deprecated - instead,
``minlength=0`` should be used.
+
``np.fromstring`` should always be passed a ``sep`` argument
------------------------------------------------------------
Without this argument, this falls back on a broken version of `np.frombuffer`
@@ -67,6 +68,13 @@ equivalent to the second.
writeable. Currently it returns a non-writeable copy. See gh-7054 for a
discussion of the issue.
+unstructured void array's ``.item`` method will return a bytes object
+---------------------------------------------------------------------
+In the future calling ``.item()`` on arrays or scalars of ``np.void`` datatype
+will return a ``bytes`` object instead of a buffer or int array, the same as
+returned by ``bytes(void_scalar)``. This may affect code which assumed the
+return value was mutable, which will no longer be the case. A ``FutureWarning``
+is now issued when this would occur.
Build System Changes
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 9ac729961..7c34a62cd 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -19,12 +19,18 @@ needs_sphinx = '1.0'
sys.path.insert(0, os.path.abspath('../sphinxext'))
-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
+extensions = ['sphinx.ext.autodoc', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.autosummary',
'sphinx.ext.graphviz',
'matplotlib.sphinxext.plot_directive']
+if sphinx.__version__ >= "1.4":
+ extensions.append('sphinx.ext.imgmath')
+ imgmath_image_format = 'svg'
+else:
+ extensions.append('sphinx.ext.pngmath')
+
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -123,8 +129,9 @@ html_file_suffix = '.html'
htmlhelp_basename = 'numpy'
-pngmath_use_preview = True
-pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
+if 'sphinx.ext.pngmath' in extensions:
+ pngmath_use_preview = True
+ pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
plot_html_show_formats = False
plot_html_show_source_link = False
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index 8a047fdda..65c3ed00d 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -183,23 +183,25 @@ def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
- Take a sequence of arrays and stack them vertically to make a single
- array. Rebuild arrays divided by `vsplit`.
+ This is equivalent to concatenation along the first axis after 1-D arrays
+ of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
+ `vsplit`.
- This function continues to be supported for backward compatibility, but
- you should prefer ``np.concatenate`` or ``np.stack``. The ``np.stack``
- function was added in NumPy 1.10.
+ This function makes most sense for arrays with up to 3 dimensions. For
+ instance, for pixel-data with a height (first axis), width (second axis),
+ and r/g/b channels (third axis). The functions `concatenate`, `stack` and
+ `block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
- Tuple containing arrays to be stacked. The arrays must have the same
- shape along all but the first axis.
+ The arrays must have the same shape along all but the first axis.
+ 1-D arrays must have the same length.
Returns
-------
stacked : ndarray
- The array formed by stacking the given arrays.
+ The array formed by stacking the given arrays, will be at least 2-D.
See Also
--------
@@ -210,11 +212,6 @@ def vstack(tup):
vsplit : Split array into a list of multiple sub-arrays vertically.
block : Assemble arrays from blocks.
- Notes
- -----
- Equivalent to ``np.concatenate(tup, axis=0)`` if `tup` contains arrays that
- are at least 2-dimensional.
-
Examples
--------
>>> a = np.array([1, 2, 3])
@@ -240,17 +237,20 @@ def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
- Take a sequence of arrays and stack them horizontally to make
- a single array. Rebuild arrays divided by `hsplit`.
+ This is equivalent to concatenation along the second axis, except for 1-D
+ arrays where it concatenates along the first axis. Rebuilds arrays divided
+ by `hsplit`.
- This function continues to be supported for backward compatibility, but
- you should prefer ``np.concatenate`` or ``np.stack``. The ``np.stack``
- function was added in NumPy 1.10.
+ This function makes most sense for arrays with up to 3 dimensions. For
+ instance, for pixel-data with a height (first axis), width (second axis),
+ and r/g/b channels (third axis). The functions `concatenate`, `stack` and
+ `block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
- All arrays must have the same shape along all but the second axis.
+ The arrays must have the same shape along all but the second axis,
+ except 1-D arrays which can be any length.
Returns
-------
@@ -266,11 +266,6 @@ def hstack(tup):
hsplit : Split array along second axis.
block : Assemble arrays from blocks.
- Notes
- -----
- Equivalent to ``np.concatenate(tup, axis=1)`` if `tup` contains arrays that
- are at least 2-dimensional.
-
Examples
--------
>>> a = np.array((1,2,3))
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index e00df6762..d0370fe6b 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -730,6 +730,21 @@ VOID_getitem(void *input, void *vap)
return (PyObject *)ret;
}
+ /* 2017-11-26, 1.14 */
+ if (DEPRECATE_FUTUREWARNING(
+ "the `.item()` method of unstructured void types will return an "
+ "immutable `bytes` object in the near future, the same as "
+ "returned by `bytes(void_obj)`, instead of the mutable memoryview "
+ "or integer array returned in numpy 1.13.") < 0) {
+ return NULL;
+ }
+ /*
+ * In the future all the code below will be replaced by
+ *
+ * For unstructured void types like V4, return a bytes object (copy).
+ * return PyBytes_FromStringAndSize(PyArray_DATA(ap), descr->elsize);
+ */
+
if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT)
|| PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) {
PyErr_SetString(PyExc_ValueError,
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index dbf71230a..915d743c8 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -16,6 +16,15 @@
#include "binop_override.h"
+/* <2.7.11 and <3.4.4 have the wrong argument type for Py_EnterRecursiveCall */
+#if (PY_VERSION_HEX < 0x02070B00) || \
+ ((0x03000000 <= PY_VERSION_HEX) && (PY_VERSION_HEX < 0x03040400))
+ #define _Py_EnterRecursiveCall(x) Py_EnterRecursiveCall((char *)(x))
+#else
+ #define _Py_EnterRecursiveCall(x) Py_EnterRecursiveCall(x)
+#endif
+
+
/*************************************************************************
**************** Implement Number Protocol ****************************
*************************************************************************/
@@ -785,7 +794,7 @@ _array_nonzero(PyArrayObject *mp)
n = PyArray_SIZE(mp);
if (n == 1) {
int res;
- if (Py_EnterRecursiveCall(" while converting array to bool")) {
+ if (_Py_EnterRecursiveCall(" while converting array to bool")) {
return -1;
}
res = PyArray_DESCR(mp)->f->nonzero(PyArray_DATA(mp), mp);
@@ -814,213 +823,112 @@ _array_nonzero(PyArrayObject *mp)
}
}
-
+/*
+ * Convert the array to a scalar if allowed, and apply the builtin function
+ * to it. The where argument is passed onto Py_EnterRecursiveCall when the
+ * array contains python objects.
+ */
NPY_NO_EXPORT PyObject *
-array_int(PyArrayObject *v)
+array_scalar_forward(PyArrayObject *v,
+ PyObject *(*builtin_func)(PyObject *),
+ const char *where)
{
- PyObject *pv, *pv2;
+ PyObject *scalar;
if (PyArray_SIZE(v) != 1) {
- PyErr_SetString(PyExc_TypeError, "only length-1 arrays can be"\
+ PyErr_SetString(PyExc_TypeError, "only size-1 arrays can be"\
" converted to Python scalars");
return NULL;
}
- pv = PyArray_GETITEM(v, PyArray_DATA(v));
- if (pv == NULL) {
- return NULL;
- }
- if (Py_TYPE(pv)->tp_as_number == 0) {
- PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\
- "scalar object is not a number");
- Py_DECREF(pv);
+
+ scalar = PyArray_GETITEM(v, PyArray_DATA(v));
+ if (scalar == NULL) {
return NULL;
}
- if (Py_TYPE(pv)->tp_as_number->nb_int == 0) {
- PyErr_SetString(PyExc_TypeError, "don't know how to convert "\
- "scalar number to int");
- Py_DECREF(pv);
- return NULL;
+
+ /* Need to guard against recursion if our array holds references */
+ if (PyDataType_REFCHK(PyArray_DESCR(v))) {
+ PyObject *res;
+ if (_Py_EnterRecursiveCall(where) != 0) {
+ Py_DECREF(scalar);
+ return NULL;
+ }
+ res = builtin_func(scalar);
+ Py_DECREF(scalar);
+ Py_LeaveRecursiveCall();
+ return res;
}
- /*
- * If we still got an array which can hold references, stop
- * because it could point back at 'v'.
- */
- if (PyArray_Check(pv) &&
- PyDataType_REFCHK(PyArray_DESCR((PyArrayObject *)pv))) {
- PyErr_SetString(PyExc_TypeError,
- "object array may be self-referencing");
- Py_DECREF(pv);
- return NULL;
+ else {
+ PyObject *res;
+ res = builtin_func(scalar);
+ Py_DECREF(scalar);
+ return res;
}
-
- pv2 = Py_TYPE(pv)->tp_as_number->nb_int(pv);
- Py_DECREF(pv);
- return pv2;
}
-static PyObject *
+
+NPY_NO_EXPORT PyObject *
array_float(PyArrayObject *v)
{
- PyObject *pv, *pv2;
- if (PyArray_SIZE(v) != 1) {
- PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\
- "be converted to Python scalars");
- return NULL;
- }
- pv = PyArray_GETITEM(v, PyArray_DATA(v));
- if (pv == NULL) {
- return NULL;
- }
- if (Py_TYPE(pv)->tp_as_number == 0) {
- PyErr_SetString(PyExc_TypeError, "cannot convert to a "\
- "float; scalar object is not a number");
- Py_DECREF(pv);
- return NULL;
- }
- if (Py_TYPE(pv)->tp_as_number->nb_float == 0) {
- PyErr_SetString(PyExc_TypeError, "don't know how to convert "\
- "scalar number to float");
- Py_DECREF(pv);
- return NULL;
- }
- /*
- * If we still got an array which can hold references, stop
- * because it could point back at 'v'.
- */
- if (PyArray_Check(pv) &&
- PyDataType_REFCHK(PyArray_DESCR((PyArrayObject *)pv))) {
- PyErr_SetString(PyExc_TypeError,
- "object array may be self-referencing");
- Py_DECREF(pv);
- return NULL;
- }
- pv2 = Py_TYPE(pv)->tp_as_number->nb_float(pv);
- Py_DECREF(pv);
- return pv2;
+ return array_scalar_forward(v, &PyNumber_Float, " in ndarray.__float__");
}
-#if !defined(NPY_PY3K)
+#if defined(NPY_PY3K)
-static PyObject *
+NPY_NO_EXPORT PyObject *
+array_int(PyArrayObject *v)
+{
+ return array_scalar_forward(v, &PyNumber_Long, " in ndarray.__int__");
+}
+
+#else
+
+NPY_NO_EXPORT PyObject *
+array_int(PyArrayObject *v)
+{
+ return array_scalar_forward(v, &PyNumber_Int, " in ndarray.__int__");
+}
+
+NPY_NO_EXPORT PyObject *
array_long(PyArrayObject *v)
{
- PyObject *pv, *pv2;
- if (PyArray_SIZE(v) != 1) {
- PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\
- "be converted to Python scalars");
- return NULL;
- }
- pv = PyArray_GETITEM(v, PyArray_DATA(v));
- if (pv == NULL) {
- return NULL;
- }
- if (Py_TYPE(pv)->tp_as_number == 0) {
- PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\
- "scalar object is not a number");
- Py_DECREF(pv);
- return NULL;
- }
- if (Py_TYPE(pv)->tp_as_number->nb_long == 0) {
- PyErr_SetString(PyExc_TypeError, "don't know how to convert "\
- "scalar number to long");
- Py_DECREF(pv);
+ return array_scalar_forward(v, &PyNumber_Long, " in ndarray.__long__");
+}
+
+/* hex and oct aren't exposed to the C api, but we need a function pointer */
+static PyObject *
+_PyNumber_Oct(PyObject *o) {
+ PyObject *res;
+ PyObject *mod = PyImport_ImportModule("__builtin__");
+ if (mod == NULL) {
return NULL;
}
- /*
- * If we still got an array which can hold references, stop
- * because it could point back at 'v'.
- */
- if (PyArray_Check(pv) &&
- PyDataType_REFCHK(PyArray_DESCR((PyArrayObject *)pv))) {
- PyErr_SetString(PyExc_TypeError,
- "object array may be self-referencing");
- Py_DECREF(pv);
+ res = PyObject_CallMethod(mod, "oct", "(O)", o);
+ Py_DECREF(mod);
+ return res;
+}
+
+static PyObject *
+_PyNumber_Hex(PyObject *o) {
+ PyObject *res;
+ PyObject *mod = PyImport_ImportModule("__builtin__");
+ if (mod == NULL) {
return NULL;
}
- pv2 = Py_TYPE(pv)->tp_as_number->nb_long(pv);
- Py_DECREF(pv);
- return pv2;
+ res = PyObject_CallMethod(mod, "hex", "(O)", o);
+ Py_DECREF(mod);
+ return res;
}
-static PyObject *
+NPY_NO_EXPORT PyObject *
array_oct(PyArrayObject *v)
{
- PyObject *pv, *pv2;
- if (PyArray_SIZE(v) != 1) {
- PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\
- "be converted to Python scalars");
- return NULL;
- }
- pv = PyArray_GETITEM(v, PyArray_DATA(v));
- if (pv == NULL) {
- return NULL;
- }
- if (Py_TYPE(pv)->tp_as_number == 0) {
- PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\
- "scalar object is not a number");
- Py_DECREF(pv);
- return NULL;
- }
- if (Py_TYPE(pv)->tp_as_number->nb_oct == 0) {
- PyErr_SetString(PyExc_TypeError, "don't know how to convert "\
- "scalar number to oct");
- Py_DECREF(pv);
- return NULL;
- }
- /*
- * If we still got an array which can hold references, stop
- * because it could point back at 'v'.
- */
- if (PyArray_Check(pv) &&
- PyDataType_REFCHK(PyArray_DESCR((PyArrayObject *)pv))) {
- PyErr_SetString(PyExc_TypeError,
- "object array may be self-referencing");
- Py_DECREF(pv);
- return NULL;
- }
- pv2 = Py_TYPE(pv)->tp_as_number->nb_oct(pv);
- Py_DECREF(pv);
- return pv2;
+ return array_scalar_forward(v, &_PyNumber_Oct, " in ndarray.__oct__");
}
-static PyObject *
+NPY_NO_EXPORT PyObject *
array_hex(PyArrayObject *v)
{
- PyObject *pv, *pv2;
- if (PyArray_SIZE(v) != 1) {
- PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\
- "be converted to Python scalars");
- return NULL;
- }
- pv = PyArray_GETITEM(v, PyArray_DATA(v));
- if (pv == NULL) {
- return NULL;
- }
- if (Py_TYPE(pv)->tp_as_number == 0) {
- PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\
- "scalar object is not a number");
- Py_DECREF(pv);
- return NULL;
- }
- if (Py_TYPE(pv)->tp_as_number->nb_hex == 0) {
- PyErr_SetString(PyExc_TypeError, "don't know how to convert "\
- "scalar number to hex");
- Py_DECREF(pv);
- return NULL;
- }
- /*
- * If we still got an array which can hold references, stop
- * because it could point back at 'v'.
- */
- if (PyArray_Check(pv) &&
- PyDataType_REFCHK(PyArray_DESCR((PyArrayObject *)pv))) {
- PyErr_SetString(PyExc_TypeError,
- "object array may be self-referencing");
- Py_DECREF(pv);
- return NULL;
- }
- pv2 = Py_TYPE(pv)->tp_as_number->nb_hex(pv);
- Py_DECREF(pv);
- return pv2;
+ return array_scalar_forward(v, &_PyNumber_Hex, " in ndarray.__hex__");
}
#endif
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 90cc473bc..a625a1bce 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -6711,6 +6711,34 @@ class TestConversion(object):
Error = RuntimeError # python < 3.5
assert_raises(Error, bool, self_containing) # previously stack overflow
+ def test_to_int_scalar(self):
+ # gh-9972 means that these aren't always the same
+ int_funcs = (int, lambda x: x.__int__())
+ for int_func in int_funcs:
+ assert_equal(int_func(np.array([1])), 1)
+ assert_equal(int_func(np.array([0])), 0)
+ assert_equal(int_func(np.array([[42]])), 42)
+ assert_raises(TypeError, int_func, np.array([1, 2]))
+
+ # gh-9972
+ assert_equal(4, int_func(np.array('4')))
+ assert_equal(5, int_func(np.bytes_(b'5')))
+ assert_equal(6, int_func(np.unicode_(u'6')))
+
+ class HasTrunc:
+ def __trunc__(self):
+ return 3
+ assert_equal(3, int_func(np.array(HasTrunc())))
+ assert_equal(3, int_func(np.array([HasTrunc()])))
+
+ class NotConvertible(object):
+ def __int__(self):
+ raise NotImplementedError
+ assert_raises(NotImplementedError,
+ int_func, np.array(NotConvertible()))
+ assert_raises(NotImplementedError,
+ int_func, np.array([NotConvertible()]))
+
class TestWhere(object):
def test_basic(self):
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index fca3312b9..a3b011454 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -20,6 +20,10 @@ from numpy.testing import (
)
from numpy.compat import asbytes, asunicode, long
+try:
+ RecursionError
+except NameError:
+ RecursionError = RuntimeError # python < 3.5
class TestRegression(object):
def test_invalid_round(self):
@@ -1683,25 +1687,47 @@ class TestRegression(object):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
- assert_raises(TypeError, int, a)
- assert_raises(TypeError, long, a)
- assert_raises(TypeError, float, a)
- assert_raises(TypeError, oct, a)
- assert_raises(TypeError, hex, a)
-
+ assert_raises(RecursionError, int, a)
+ assert_raises(RecursionError, long, a)
+ assert_raises(RecursionError, float, a)
+ if sys.version_info.major == 2:
+ # in python 3, this falls back on operator.index, which fails on
+ # on dtype=object
+ assert_raises(RecursionError, oct, a)
+ assert_raises(RecursionError, hex, a)
+ a[()] = None
+
+ def test_object_array_circular_reference(self):
# Test the same for a circular reference.
- b = np.array(a, dtype=object)
+ a = np.array(0, dtype=object)
+ b = np.array(0, dtype=object)
a[()] = b
- assert_raises(TypeError, int, a)
+ b[()] = a
+ assert_raises(RecursionError, int, a)
# NumPy has no tp_traverse currently, so circular references
# cannot be detected. So resolve it:
- a[()] = 0
+ a[()] = None
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
+ def test_object_array_nested(self):
+ # but is fine with a reference to a different array
+ a = np.array(0, dtype=object)
+ b = np.array(0, dtype=object)
+ a[()] = b
+ assert_equal(int(a), int(0))
+ assert_equal(long(a), long(0))
+ assert_equal(float(a), float(0))
+ if sys.version_info.major == 2:
+ # in python 3, this falls back on operator.index, which fails on
+ # on dtype=object
+ assert_equal(oct(a), oct(0))
+ assert_equal(hex(a), hex(0))
+
+
def test_object_array_self_copy(self):
# An object array being copied into itself DECREF'ed before INCREF'ing
# causing segmentation faults (gh-3787)
@@ -2234,6 +2260,15 @@ class TestRegression(object):
item2 = copy.copy(item)
assert_equal(item, item2)
+ def test_void_item_memview(self):
+ va = np.zeros(10, 'V4')
+ # for now, there is just a futurewarning
+ assert_warns(FutureWarning, va[:1].item)
+ # in the future, test we got a bytes copy:
+ #x = va[:1].item()
+ #va[0] = b'\xff\xff\xff\xff'
+ #del va
+ #assert_equal(x, b'\x00\x00\x00\x00')
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index a8977bd4c..83e39f9f5 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -354,25 +354,26 @@ def dstack(tup):
"""
Stack arrays in sequence depth wise (along third axis).
- Takes a sequence of arrays and stack them along the third axis
- to make a single array. Rebuilds arrays divided by `dsplit`.
- This is a simple way to stack 2D arrays (images) into a single
- 3D array for processing.
+ This is equivalent to concatenation along the third axis after 2-D arrays
+ of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
+ `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
+ `dsplit`.
- This function continues to be supported for backward compatibility, but
- you should prefer ``np.concatenate`` or ``np.stack``. The ``np.stack``
- function was added in NumPy 1.10.
+ This function makes most sense for arrays with up to 3 dimensions. For
+ instance, for pixel-data with a height (first axis), width (second axis),
+ and r/g/b channels (third axis). The functions `concatenate`, `stack` and
+ `block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of arrays
- Arrays to stack. All of them must have the same shape along all
- but the third axis.
+ The arrays must have the same shape along all but the third axis.
+ 1-D or 2-D arrays must have the same shape.
Returns
-------
stacked : ndarray
- The array formed by stacking the given arrays.
+ The array formed by stacking the given arrays, will be at least 3-D.
See Also
--------
@@ -382,11 +383,6 @@ def dstack(tup):
concatenate : Join a sequence of arrays along an existing axis.
dsplit : Split array along third axis.
- Notes
- -----
- Equivalent to ``np.concatenate(tup, axis=2)`` if `tup` contains arrays that
- are at least 3-dimensional.
-
Examples
--------
>>> a = np.array((1,2,3))
diff --git a/site.cfg.example b/site.cfg.example
index 8e043cfb3..645b48543 100644
--- a/site.cfg.example
+++ b/site.cfg.example
@@ -153,29 +153,32 @@
# runtime_library_dirs = /home/username/blis/lib
# MKL
-#----
-# MKL is Intel's very optimized yet proprietary implementation of BLAS and
-# Lapack.
-# For recent (9.0.21, for example) mkl, you need to change the names of the
-# lapack library. Assuming you installed the mkl in /opt, for a 32 bits cpu:
+#----
+# Intel MKL is Intel's very optimized yet proprietary implementation of BLAS and
+# Lapack. Find the latest info on building numpy with Intel MKL in this article:
+# https://software.intel.com/en-us/articles/numpyscipy-with-intel-mkl
+# Assuming you installed the mkl in /opt/intel/compilers_and_libraries_2018/linux/mkl,
+# for 64 bits code at Linux:
+# [mkl]
+# library_dirs = /opt/intel/compilers_and_libraries_2018/linux/mkl/lib/intel64
+# include_dirs = /opt/intel/compilers_and_libraries_2018/linux/mkl/include
+# mkl_libs = mkl_rt
+# lapack_libs = 
+#
+# For 32 bit code at Linux:
# [mkl]
-# library_dirs = /opt/intel/mkl/9.1.023/lib/32/
-# lapack_libs = mkl_lapack
-#
-# For 10.*, on 32 bits machines:
-# [mkl]
-# library_dirs = /opt/intel/mkl/10.0.1.014/lib/32/
-# lapack_libs = mkl_lapack
-# mkl_libs = mkl, guide
-#
-# On win-64, the following options compiles numpy with the MKL library
-# dynamically linked.
-# [mkl]
-# include_dirs = C:\Program Files (x86)\Intel\Composer XE 2015\mkl\include
-# library_dirs = C:\Program Files (x86)\Intel\Composer XE 2015\mkl\lib\intel64
-# mkl_libs = mkl_core_dll, mkl_intel_lp64_dll, mkl_intel_thread_dll
-# lapack_libs = mkl_lapack95_lp64
-
+# library_dirs = /opt/intel/compilers_and_libraries_2018/linux/mkl/lib/ia32
+# include_dirs = /opt/intel/compilers_and_libraries_2018/linux/mkl/include
+# mkl_libs = mkl_rt
+# lapack_libs = 
+#
+# On win-64, the following options compiles numpy with the MKL library
+# dynamically linked.
+# [mkl]
+# include_dirs = C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\mkl\include
+# library_dirs = C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\mkl\lib\intel64
+# mkl_libs = mkl_rt
+# lapack_libs =
# UMFPACK
# -------