summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bento.info1
-rw-r--r--bscript54
-rw-r--r--doc/release/2.0.0-notes.rst67
-rw-r--r--doc/sphinxext/LICENSE.txt7
-rw-r--r--doc/sphinxext/setup.py14
-rw-r--r--numpy/__config__.py.in2
-rw-r--r--numpy/core/code_generators/cversions.txt4
-rw-r--r--numpy/core/code_generators/numpy_api.py2
-rw-r--r--numpy/core/fromnumeric.py22
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h39
-rw-r--r--numpy/core/numeric.py5
-rw-r--r--numpy/core/src/multiarray/array_assign_array.c5
-rw-r--r--numpy/core/src/multiarray/array_assign_scalar.c5
-rw-r--r--numpy/core/src/multiarray/arrayobject.c108
-rw-r--r--numpy/core/src/multiarray/arrayobject.h14
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src4
-rw-r--r--numpy/core/src/multiarray/buffer.c28
-rw-r--r--numpy/core/src/multiarray/ctors.c48
-rw-r--r--numpy/core/src/multiarray/datetime.c4
-rw-r--r--numpy/core/src/multiarray/descriptor.c28
-rw-r--r--numpy/core/src/multiarray/getset.c16
-rw-r--r--numpy/core/src/multiarray/item_selection.c19
-rw-r--r--numpy/core/src/multiarray/iterators.c32
-rw-r--r--numpy/core/src/multiarray/mapping.c8
-rw-r--r--numpy/core/src/multiarray/methods.c14
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c20
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src1
-rw-r--r--numpy/core/src/multiarray/sequence.c4
-rw-r--r--numpy/core/src/npymath/ieee754.c.src13
-rw-r--r--numpy/core/src/umath/ufunc_object.c11
-rw-r--r--numpy/core/tests/test_datetime.py8
-rw-r--r--numpy/core/tests/test_maskna.py6
-rw-r--r--numpy/core/tests/test_multiarray.py192
-rw-r--r--numpy/core/tests/test_nditer.py1
-rw-r--r--numpy/lib/function_base.py322
-rw-r--r--numpy/lib/index_tricks.py40
-rw-r--r--numpy/lib/tests/test_function_base.py155
-rw-r--r--numpy/lib/tests/test_index_tricks.py38
-rw-r--r--numpy/lib/twodim_base.py15
-rw-r--r--numpy/ma/core.py6
-rw-r--r--numpy/ma/tests/test_core.py10
-rw-r--r--numpy/numarray/_capi.c22
-rw-r--r--numpy/version.py.in11
43 files changed, 1018 insertions, 407 deletions
diff --git a/bento.info b/bento.info
index d4c7d3eee..f53b0b4d0 100644
--- a/bento.info
+++ b/bento.info
@@ -76,6 +76,7 @@ DataFiles: numpy-includes
numpy/core/include/numpy/fenv/*.h
HookFile: bscript
+MetaTemplateFiles: numpy/version.py.in, numpy/__config__.py.in
Recurse: numpy
UseBackends: Waf
diff --git a/bscript b/bscript
index 9517bcbd1..88f434d7f 100644
--- a/bscript
+++ b/bscript
@@ -22,7 +22,9 @@ import __builtin__
__builtin__.__NUMPY_SETUP__ = True
from bento.commands import hooks
-from bento.backends import waf_backend
+from bento.utils.utils \
+ import \
+ cmd_is_runnable
import waflib
@@ -88,32 +90,25 @@ def check_blas_lapack(conf):
#conf.env.HAS_LAPACK = True
#conf.env.LIB_LAPACK = ["lapack", "f77blas", "cblas", "atlas"]
-def set_revision(template, version):
- try:
- proc = subprocess.Popen('git rev-parse --short HEAD',
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True)
- git_revision, _ = proc.communicate()
- git_revision = git_revision.strip()
- except Exception:
- git_revision = "Unknown"
-
- full_version = version
- template_str = template.read()
+def compute_git_revision(top_node):
+ git_repo_node = top_node.find_node(".git")
+ if git_repo_node and cmd_is_runnable(["git", "--version"]):
+ s = subprocess.Popen(["git", "rev-parse", "HEAD"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=top_node.abspath())
+ out = s.communicate()[0]
+ return out.decode().strip()
+ else:
+ return ""
+def _register_metadata(context):
+ git_revision = compute_git_revision(context.top_node)
+ full_version = context.pkg.version
if not _SETUP_PY.ISRELEASED:
full_version += '.dev-' + git_revision[:7]
- content = string.Template(template_str).substitute(version=version,
- full_version=full_version, git_revision=git_revision,
- is_released=_SETUP_PY.ISRELEASED)
- output = template.change_ext("")
- output.safe_write(content)
- return output
-def make_git_commit_info(ctx):
- commit_template = ctx.make_source_node(op.join("numpy", "version.py.in"))
- return set_revision(commit_template, ctx.pkg.version)
+ context.register_metadata("git_revision", git_revision)
+ context.register_metadata("is_released", _SETUP_PY.ISRELEASED)
+ context.register_metadata("full_version", full_version)
@hooks.post_configure
def post_configure(context):
@@ -124,11 +119,8 @@ def post_configure(context):
@hooks.pre_build
def pre_build(context):
- commit_output = make_git_commit_info(context)
- context.register_outputs_simple([commit_output])
-
- # FIXME: we write a dummy show for now - the original show function is not
- # super useful anyway.
- config_node = context.make_build_node("numpy/__config__.py")
- config_node.safe_write("def show(): pass")
- context.register_outputs_simple([config_node])
+ _register_metadata(context)
+
+@hooks.pre_sdist
+def pre_sdist(context):
+ _register_metadata(context)
diff --git a/doc/release/2.0.0-notes.rst b/doc/release/2.0.0-notes.rst
index 3b61afc31..7a9d43fa1 100644
--- a/doc/release/2.0.0-notes.rst
+++ b/doc/release/2.0.0-notes.rst
@@ -8,6 +8,41 @@ Highlights
==========
+Compatibility notes
+===================
+
+In a future version of numpy, the functions np.diag, np.diagonal, and
+the diagonal method of ndarrays will return a view onto the original
+array, instead of producing a copy as they do now. This makes a
+difference if you write to the array returned by any of these
+functions. To facilitate this transition, numpy 1.7 produces a
+DeprecationWarning if it detects that you may be attempting to write
+to such an array. See the documentation for np.diagonal for details.
+
+The default casting rule for UFunc out= parameters has been changed from
+'unsafe' to 'same_kind'. Most usages which violate the 'same_kind'
+rule are likely bugs, so this change may expose previously undetected
+errors in projects that depend on NumPy.
+
+Full-array boolean indexing used to allow boolean arrays with a size
+non-broadcastable to the array size. Now it forces this to be broadcastable.
+Since this affects some legacy code, this change will require discussion
+during alpha or early beta testing, and a decision to either keep the
+stricter behavior, or add in a hack to allow the previous behavior to
+work.
+
+Attempting to write to a read-only array (one with
+``arr.flags.writeable`` set to ``False``) used to raise either a
+RuntimeError, ValueError, or TypeError inconsistently, depending on
+which code path was taken. It now consistently raises a ValueError.
+
+The <ufunc>.reduce functions evaluate some reductions in a different
+order than in previous versions of NumPy, generally providing higher
+performance. Because of the nature of floating-point arithmetic, this
+may subtly change some results, just as linking NumPy to a different
+BLAS implementations such as MKL can.
+
+
New features
============
@@ -148,36 +183,20 @@ New argument to searchsorted
The function searchsorted now accepts a 'sorter' argument that is a
permuation array that sorts the array to search.
+C API
+-----
+
+New function ``PyArray_RequireWriteable`` provides a consistent
+interface for checking array writeability -- any C code which works
+with arrays whose WRITEABLE flag is not known to be True a priori,
+should make sure to call this function before writing.
+
Changes
=======
General
-------
-The default casting rule for UFunc out= parameters has been changed from
-'unsafe' to 'same_kind'. Most usages which violate the 'same_kind'
-rule are likely bugs, so this change may expose previously undetected
-errors in projects that depend on NumPy.
-
-Full-array boolean indexing used to allow boolean arrays with a size
-non-broadcastable to the array size. Now it forces this to be broadcastable.
-Since this affects some legacy code, this change will require discussion
-during alpha or early beta testing, and a decision to either keep the
-stricter behavior, or add in a hack to allow the previous behavior to
-work.
-
-The functions np.diag, np.diagonal, and <ndarray>.diagonal now return a
-view into the original array instead of making a copy. This makes these
-functions more consistent with NumPy's general approach of taking views
-where possible, and performs much faster as well. This has the
-potential to break code that assumes a copy is made instead of a view.
-
-The <ufunc>.reduce functions evaluates some reductions in a different
-order than in previous versions of NumPy, generally providing higher
-performance. Because of the nature of floating-point arithmetic, this
-may subtly change some results, just as linking NumPy to a different
-BLAS implementations such as MKL can.
-
The function np.concatenate tries to match the layout of its input
arrays. Previously, the layout did not follow any particular reason,
and depended in an undesirable way on the particular axis chosen for
diff --git a/doc/sphinxext/LICENSE.txt b/doc/sphinxext/LICENSE.txt
index e00efc31e..b15c699dc 100644
--- a/doc/sphinxext/LICENSE.txt
+++ b/doc/sphinxext/LICENSE.txt
@@ -1,8 +1,6 @@
-------------------------------------------------------------------------------
The files
- numpydoc.py
- - autosummary.py
- - autosummary_generate.py
- docscrape.py
- docscrape_sphinx.py
- phantom_import.py
@@ -71,10 +69,9 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------------------
- The files
- - only_directives.py
+ The file
- plot_directive.py
- originate from Matplotlib (http://matplotlib.sf.net/) which has
+ originates from Matplotlib (http://matplotlib.sf.net/) which has
the following license:
Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved.
diff --git a/doc/sphinxext/setup.py b/doc/sphinxext/setup.py
index 76e3fd81b..ec6aa3840 100644
--- a/doc/sphinxext/setup.py
+++ b/doc/sphinxext/setup.py
@@ -1,13 +1,11 @@
from distutils.core import setup
-import setuptools
-import sys, os
version = "0.4"
setup(
name="numpydoc",
packages=["numpydoc"],
- package_dir={"numpydoc": ""},
+ package_dir={"numpydoc": "."},
version=version,
description="Sphinx extension to support docstrings in Numpy format",
# classifiers from http://pypi.python.org/pypi?%3Aaction=list_classifiers
@@ -20,12 +18,6 @@ setup(
author_email="pav@iki.fi",
url="http://github.com/numpy/numpy/tree/master/doc/sphinxext",
license="BSD",
- zip_safe=False,
- install_requires=["Sphinx >= 1.0.1"],
- package_data={'numpydoc': 'tests', '': ''},
- entry_points={
- "console_scripts": [
- "autosummary_generate = numpydoc.autosummary_generate:main",
- ],
- },
+ requires=["sphinx (>= 1.0.1)"],
+ package_data={'numpydoc': ['tests/test_*.py']},
)
diff --git a/numpy/__config__.py.in b/numpy/__config__.py.in
new file mode 100644
index 000000000..3190d21b4
--- /dev/null
+++ b/numpy/__config__.py.in
@@ -0,0 +1,2 @@
+def show():
+ pass
diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt
index 6330d4ae6..3599f47c7 100644
--- a/numpy/core/code_generators/cversions.txt
+++ b/numpy/core/code_generators/cversions.txt
@@ -9,5 +9,5 @@
# Version 6 (NumPy 1.6) added new iterator, half float and casting functions,
# PyArray_CountNonzero, PyArray_NewLikeArray and PyArray_MatrixProduct2.
0x00000006 = e61d5dc51fa1c6459328266e215d6987
-# Version 7 (NumPy 1.7) added API for NA, improved datetime64.
-0x00000007 = eb54c77ff4149bab310324cd7c0cb176
+# Version 7 (NumPy 1.7) added API for NA, improved datetime64, misc utilities.
+0x00000007 = 280023b3ecfc2ad0326874917f6f16f9
diff --git a/numpy/core/code_generators/numpy_api.py b/numpy/core/code_generators/numpy_api.py
index ca89c28ec..15b868e23 100644
--- a/numpy/core/code_generators/numpy_api.py
+++ b/numpy/core/code_generators/numpy_api.py
@@ -344,6 +344,8 @@ multiarray_funcs_api = {
'NpyNA_FromDTypeAndPayload': 304,
'PyArray_AllowNAConverter': 305,
'PyArray_OutputAllowNAConverter': 306,
+ 'PyArray_FailUnlessWriteable': 307,
+ 'PyArray_SetUpdateIfCopyBase': 308,
}
ufunc_types_api = {
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index dae109a98..2b108bcf9 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -932,7 +932,27 @@ def diagonal(a, offset=0, axis1=0, axis2=1):
removing `axis1` and `axis2` and appending an index to the right equal
to the size of the resulting diagonals.
- As of NumPy 1.7, this function always returns a view into `a`.
+ In versions of NumPy prior to 1.7, this function always returned a new,
+ independent array containing a copy of the values in the diagonal.
+
+ In NumPy 1.7, it continues to return a copy of the diagonal, but depending
+ on this fact is deprecated. Writing to the resulting array continues to
+ work as it used to, but a DeprecationWarning will be issued.
+
+ In NumPy 1.8, it will switch to returning a read-only view on the original
+ array. Attempting to write to the resulting array will produce an error.
+
+ In NumPy 1.9, it will still return a view, but this view will no longer be
+ marked read-only. Writing to the returned array will alter your original
+ array as well.
+
+ If you don't write to the array returned by this function, then you can
+ just ignore all of the above.
+
+ If you depend on the current behavior, then we suggest copying the
+ returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead of
+ just ``np.diagonal(a)``. This will work with both past and future versions
+ of NumPy.
Parameters
----------
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index db5257761..d77c5c90f 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -229,22 +229,27 @@ typedef enum {
#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1)
typedef enum {
- NPY_FR_Y, /* Years */
- NPY_FR_M, /* Months */
- NPY_FR_W, /* Weeks */
- NPY_FR_D, /* Days */
- NPY_FR_h, /* hours */
- NPY_FR_m, /* minutes */
- NPY_FR_s, /* seconds */
- NPY_FR_ms,/* milliseconds */
- NPY_FR_us,/* microseconds */
- NPY_FR_ns,/* nanoseconds */
- NPY_FR_ps,/* picoseconds */
- NPY_FR_fs,/* femtoseconds */
- NPY_FR_as,/* attoseconds */
- NPY_FR_GENERIC /* Generic, unbound units, can convert to anything */
+ NPY_FR_Y = 0, /* Years */
+ NPY_FR_M = 1, /* Months */
+ NPY_FR_W = 2, /* Weeks */
+ /* Gap where 1.6 NPY_FR_B (value 3) was */
+ NPY_FR_D = 4, /* Days */
+ NPY_FR_h = 5, /* hours */
+ NPY_FR_m = 6, /* minutes */
+ NPY_FR_s = 7, /* seconds */
+ NPY_FR_ms = 8, /* milliseconds */
+ NPY_FR_us = 9, /* microseconds */
+ NPY_FR_ns = 10,/* nanoseconds */
+ NPY_FR_ps = 11,/* picoseconds */
+ NPY_FR_fs = 12,/* femtoseconds */
+ NPY_FR_as = 13,/* attoseconds */
+ NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */
} NPY_DATETIMEUNIT;
+/*
+ * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS
+ * is technically one more than the actual number of units.
+ */
#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1)
#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC
@@ -917,6 +922,10 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);
*/
#define NPY_ARRAY_ALLOWNA 0x8000
+/*
+ * NOTE: there are also internal flags defined in multiarray/arrayobject.h,
+ * which start at bit 31 and work down.
+ */
#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \
NPY_ARRAY_WRITEABLE)
@@ -1550,7 +1559,7 @@ static NPY_INLINE PyObject *
PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr)
{
return ((PyArrayObject_fields *)arr)->descr->f->getitem(
- (void *)itemptr, (PyArrayObject *)arr);
+ (void *)itemptr, (PyArrayObject *)arr);
}
static NPY_INLINE int
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 9a51229a1..aa7d2c29b 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -1956,9 +1956,8 @@ def identity(n, dtype=None, maskna=False):
[ 0., 0., 1.]])
"""
- a = zeros((n,n), dtype=dtype, maskna=maskna)
- a.diagonal()[...] = 1
- return a
+ from numpy import eye
+ return eye(n, dtype=dtype, maskna=maskna)
def allclose(a, b, rtol=1.e-5, atol=1.e-8):
"""
diff --git a/numpy/core/src/multiarray/array_assign_array.c b/numpy/core/src/multiarray/array_assign_array.c
index 03231a995..a9f3a35fe 100644
--- a/numpy/core/src/multiarray/array_assign_array.c
+++ b/numpy/core/src/multiarray/array_assign_array.c
@@ -449,10 +449,7 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src,
return 0;
}
- /* Check that 'dst' is writeable */
- if (!PyArray_ISWRITEABLE(dst)) {
- PyErr_SetString(PyExc_RuntimeError,
- "cannot assign to a read-only array");
+ if (PyArray_FailUnlessWriteable(dst, "assignment destination") < 0) {
goto fail;
}
diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c
index bf0676c96..60658ed4d 100644
--- a/numpy/core/src/multiarray/array_assign_scalar.c
+++ b/numpy/core/src/multiarray/array_assign_scalar.c
@@ -336,10 +336,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst,
int allocated_src_data = 0, dst_has_maskna = PyArray_HASMASKNA(dst);
npy_longlong scalarbuffer[4];
- /* Check that 'dst' is writeable */
- if (!PyArray_ISWRITEABLE(dst)) {
- PyErr_SetString(PyExc_RuntimeError,
- "cannot assign a scalar value to a read-only array");
+ if (PyArray_FailUnlessWriteable(dst, "assignment destination") < 0) {
return -1;
}
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index 11b04989f..4f0181b4a 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -67,6 +67,57 @@ PyArray_Size(PyObject *op)
}
/*NUMPY_API
+ *
+ * Precondition: 'arr' is a copy of 'base' (though possibly with different
+ * strides, ordering, etc.). This function sets the UPDATEIFCOPY flag and the
+ * ->base pointer on 'arr', so that when 'arr' is destructed, it will copy any
+ * changes back to 'base'.
+ *
+ * Steals a reference to 'base'.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+NPY_NO_EXPORT int
+PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base)
+{
+ if (base == NULL) {
+ PyErr_SetString(PyExc_ValueError,
+ "Cannot UPDATEIFCOPY to NULL array");
+ return -1;
+ }
+ if (PyArray_BASE(arr) != NULL) {
+ PyErr_SetString(PyExc_ValueError,
+ "Cannot set array with existing base to UPDATEIFCOPY");
+ goto fail;
+ }
+ if (PyArray_FailUnlessWriteable(base, "UPDATEIFCOPY base") < 0) {
+ goto fail;
+ }
+
+ /*
+ * Any writes to 'arr' will magicaly turn into writes to 'base', so we
+ * should warn if necessary.
+ */
+ if (PyArray_FLAGS(base) & NPY_ARRAY_WARN_ON_WRITE) {
+ PyArray_ENABLEFLAGS(arr, NPY_ARRAY_WARN_ON_WRITE);
+ }
+
+ /*
+ * Unlike PyArray_SetBaseObject, we do not compress the chain of base
+ * references.
+ */
+ ((PyArrayObject_fields *)arr)->base = (PyObject *)base;
+ PyArray_ENABLEFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY);
+ PyArray_CLEARFLAGS(base, NPY_ARRAY_WRITEABLE);
+
+ return 0;
+
+ fail:
+ Py_DECREF(base);
+ return -1;
+}
+
+/*NUMPY_API
* Sets the 'base' attribute of the array. This steals a reference
* to 'obj'.
*
@@ -104,6 +155,11 @@ PyArray_SetBaseObject(PyArrayObject *arr, PyObject *obj)
PyArrayObject *obj_arr = (PyArrayObject *)obj;
PyObject *tmp;
+ /* Propagate WARN_ON_WRITE through views. */
+ if (PyArray_FLAGS(obj_arr) & NPY_ARRAY_WARN_ON_WRITE) {
+ PyArray_ENABLEFLAGS(arr, NPY_ARRAY_WARN_ON_WRITE);
+ }
+
/* If this array owns its own data, stop collapsing */
if (PyArray_CHKFLAGS(obj_arr, NPY_ARRAY_OWNDATA)) {
break;
@@ -704,6 +760,58 @@ PyArray_CompareString(char *s1, char *s2, size_t len)
}
+/* Call this from contexts where an array might be written to, but we have no
+ * way to tell. (E.g., when converting to a read-write buffer.)
+ */
+NPY_NO_EXPORT int
+array_might_be_written(PyArrayObject *obj)
+{
+ const char *msg =
+ "Numpy has detected that you (may be) writing to an array returned\n"
+ "by numpy.diagonal. This code will likely break in the next numpy\n"
+ "release -- see numpy.diagonal docs for details. The quick fix is\n"
+ "to make an explicit copy (e.g., do arr.diagonal().copy()).";
+ if (PyArray_FLAGS(obj) & NPY_ARRAY_WARN_ON_WRITE) {
+ if (DEPRECATE(msg) < 0) {
+ return -1;
+ }
+ /* Only warn once per array */
+ while (1) {
+ PyArray_CLEARFLAGS(obj, NPY_ARRAY_WARN_ON_WRITE);
+ if (!PyArray_BASE(obj) || !PyArray_Check(PyArray_BASE(obj))) {
+ break;
+ }
+ obj = (PyArrayObject *)PyArray_BASE(obj);
+ }
+ }
+ return 0;
+}
+
+/*NUMPY_API
+ *
+ * This function does nothing if obj is writeable, and raises an exception
+ * (and returns -1) if obj is not writeable. It may also do other
+ * house-keeping, such as issuing warnings on arrays which are transitioning
+ * to become views. Always call this function at some point before writing to
+ * an array.
+ *
+ * 'name' is a name for the array, used to give better error
+ * messages. Something like "assignment destination", "output array", or even
+ * just "array".
+ */
+NPY_NO_EXPORT int
+PyArray_FailUnlessWriteable(PyArrayObject *obj, const char *name)
+{
+ if (!PyArray_ISWRITEABLE(obj)) {
+ PyErr_Format(PyExc_ValueError, "%s is read-only", name);
+ return -1;
+ }
+ if (array_might_be_written(obj) < 0) {
+ return -1;
+ }
+ return 0;
+}
+
/* This also handles possibly mis-aligned data */
/* Compare s1 and s2 which are not necessarily NULL-terminated.
s1 is of length len1
diff --git a/numpy/core/src/multiarray/arrayobject.h b/numpy/core/src/multiarray/arrayobject.h
index ec3361435..9b74944ff 100644
--- a/numpy/core/src/multiarray/arrayobject.h
+++ b/numpy/core/src/multiarray/arrayobject.h
@@ -12,4 +12,18 @@ _strings_richcompare(PyArrayObject *self, PyArrayObject *other, int cmp_op,
NPY_NO_EXPORT PyObject *
array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op);
+NPY_NO_EXPORT int
+array_might_be_written(PyArrayObject *obj);
+
+/*
+ * This flag is used to mark arrays which we would like to, in the future,
+ * turn into views. It causes a warning to be issued on the first attempt to
+ * write to the array (but the write is allowed to succeed).
+ *
+ * This flag is for internal use only, and may be removed in a future release,
+ * which is why the #define is not exposed to user code. Currently it is set
+ * on arrays returned by ndarray.diagonal.
+ */
+static const int NPY_ARRAY_WARN_ON_WRITE = (1 << 31);
+
#endif
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index d1036a7cd..1846d8114 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -18,6 +18,7 @@
#include "usertypes.h"
#include "_datetime.h"
#include "na_object.h"
+#include "arrayobject.h"
#include "numpyos.h"
@@ -649,6 +650,9 @@ VOID_getitem(char *ip, PyArrayObject *ap)
* current item a view of it
*/
if (PyArray_ISWRITEABLE(ap)) {
+ if (array_might_be_written(ap) < 0) {
+ return NULL;
+ }
u = (PyArrayObject *)PyBuffer_FromReadWriteMemory(ip, itemsize);
}
else {
diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c
index ac94d270e..a1ad23911 100644
--- a/numpy/core/src/multiarray/buffer.c
+++ b/numpy/core/src/multiarray/buffer.c
@@ -13,6 +13,7 @@
#include "buffer.h"
#include "numpyos.h"
+#include "arrayobject.h"
/*************************************************************************
**************** Implement Buffer Protocol ****************************
@@ -57,14 +58,10 @@ array_getreadbuf(PyArrayObject *self, Py_ssize_t segment, void **ptrptr)
static Py_ssize_t
array_getwritebuf(PyArrayObject *self, Py_ssize_t segment, void **ptrptr)
{
- if (PyArray_CHKFLAGS(self, NPY_ARRAY_WRITEABLE)) {
- return array_getreadbuf(self, segment, (void **) ptrptr);
- }
- else {
- PyErr_SetString(PyExc_ValueError, "array cannot be "
- "accessed as a writeable buffer");
+ if (PyArray_FailUnlessWriteable(self, "buffer source array") < 0) {
return -1;
}
+ return array_getreadbuf(self, segment, (void **) ptrptr);
}
static Py_ssize_t
@@ -632,10 +629,21 @@ array_getbuffer(PyObject *obj, Py_buffer *view, int flags)
PyErr_SetString(PyExc_ValueError, "ndarray is not C-contiguous");
goto fail;
}
- if ((flags & PyBUF_WRITEABLE) == PyBUF_WRITEABLE &&
- !PyArray_ISWRITEABLE(self)) {
- PyErr_SetString(PyExc_ValueError, "ndarray is not writeable");
- goto fail;
+ if ((flags & PyBUF_WRITEABLE) == PyBUF_WRITEABLE) {
+ if (PyArray_FailUnlessWriteable(self, "buffer source array") < 0) {
+ goto fail;
+ }
+ }
+ /*
+ * If a read-only buffer is requested on a read-write array, we return a
+ * read-write buffer, which is dubious behavior. But that's why this call
+ * is guarded by PyArray_ISWRITEABLE rather than (flags &
+ * PyBUF_WRITEABLE).
+ */
+ if (PyArray_ISWRITEABLE(self)) {
+ if (array_might_be_written(self) < 0) {
+ goto fail;
+ }
}
if (view == NULL) {
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 12c2ccad5..311e2f863 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -1310,7 +1310,9 @@ _array_from_buffer_3118(PyObject *obj, PyObject **out)
r = PyArray_NewFromDescr(&PyArray_Type, descr,
nd, shape, strides, view->buf,
flags, NULL);
- ((PyArrayObject_fields *)r)->base = memoryview;
+ if (PyArray_SetBaseObject((PyArrayObject *)r, memoryview) < 0) {
+ goto fail;
+ }
PyArray_UpdateFlags((PyArrayObject *)r, NPY_ARRAY_UPDATE_ALL);
*out = r;
@@ -1348,9 +1350,8 @@ PyArray_GetArrayParamsFromObjectEx(PyObject *op,
/* If op is an array */
if (PyArray_Check(op)) {
- if (writeable && !PyArray_ISWRITEABLE((PyArrayObject *)op)) {
- PyErr_SetString(PyExc_RuntimeError,
- "cannot write to array");
+ if (writeable
+ && PyArray_FailUnlessWriteable((PyArrayObject *)op, "array") < 0) {
return -1;
}
Py_INCREF(op);
@@ -1419,9 +1420,8 @@ PyArray_GetArrayParamsFromObjectEx(PyObject *op,
/* If op supports the PEP 3118 buffer interface */
if (!PyBytes_Check(op) && !PyUnicode_Check(op) &&
_array_from_buffer_3118(op, (PyObject **)out_arr) == 0) {
- if (writeable && !PyArray_ISWRITEABLE(*out_arr)) {
- PyErr_SetString(PyExc_RuntimeError,
- "cannot write to PEP 3118 buffer");
+ if (writeable
+ && PyArray_FailUnlessWriteable(*out_arr, "PEP 3118 buffer") < 0) {
Py_DECREF(*out_arr);
return -1;
}
@@ -1440,9 +1440,9 @@ PyArray_GetArrayParamsFromObjectEx(PyObject *op,
}
}
if (tmp != Py_NotImplemented) {
- if (writeable && !PyArray_ISWRITEABLE((PyArrayObject *)tmp)) {
- PyErr_SetString(PyExc_RuntimeError,
- "cannot write to array interface object");
+ if (writeable
+ && PyArray_FailUnlessWriteable((PyArrayObject *)tmp,
+ "array interface object") < 0) {
Py_DECREF(tmp);
return -1;
}
@@ -1462,9 +1462,9 @@ PyArray_GetArrayParamsFromObjectEx(PyObject *op,
if (!writeable) {
tmp = PyArray_FromArrayAttr(op, requested_dtype, context);
if (tmp != Py_NotImplemented) {
- if (writeable && !PyArray_ISWRITEABLE((PyArrayObject *)tmp)) {
- PyErr_SetString(PyExc_RuntimeError,
- "cannot write to array interface object");
+ if (writeable
+ && PyArray_FailUnlessWriteable((PyArrayObject *)tmp,
+ "array interface object") < 0) {
Py_DECREF(tmp);
return -1;
}
@@ -2032,13 +2032,6 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags)
order = NPY_CORDER;
}
- if ((flags & NPY_ARRAY_UPDATEIFCOPY) &&
- (!PyArray_ISWRITEABLE(arr))) {
- Py_DECREF(newtype);
- PyErr_SetString(PyExc_ValueError,
- "cannot copy back to a read-only array");
- return NULL;
- }
if ((flags & NPY_ARRAY_ENSUREARRAY)) {
subok = 0;
}
@@ -2078,14 +2071,11 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags)
}
if (flags & NPY_ARRAY_UPDATEIFCOPY) {
- /*
- * Don't use PyArray_SetBaseObject, because that compresses
- * the chain of bases.
- */
Py_INCREF(arr);
- ((PyArrayObject_fields *)ret)->base = (PyObject *)arr;
- PyArray_ENABLEFLAGS(ret, NPY_ARRAY_UPDATEIFCOPY);
- PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEABLE);
+ if (PyArray_SetUpdateIfCopyBase(ret, arr) < 0) {
+ Py_DECREF(ret);
+ return NULL;
+ }
}
}
/*
@@ -2599,9 +2589,7 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order)
NPY_BEGIN_THREADS_DEF;
- if (!PyArray_ISWRITEABLE(dst)) {
- PyErr_SetString(PyExc_RuntimeError,
- "cannot write to array");
+ if (PyArray_FailUnlessWriteable(dst, "destination array") < 0) {
return -1;
}
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index 6a209635f..8fe968e7e 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -41,6 +41,7 @@ NPY_NO_EXPORT char *_datetime_strings[NPY_DATETIME_NUMUNITS] = {
"Y",
"M",
"W",
+ "<invalid>",
"D",
"h",
"m",
@@ -949,6 +950,8 @@ static NPY_DATETIMEUNIT _multiples_table[16][4] = {
{NPY_FR_W, NPY_FR_D, NPY_FR_h},
{7, 168, 10080}, /* NPY_FR_W */
{NPY_FR_D, NPY_FR_h, NPY_FR_m},
+ {0}, /* Gap for removed NPY_FR_B */
+ {0},
{24, 1440, 86400}, /* NPY_FR_D */
{NPY_FR_h, NPY_FR_m, NPY_FR_s},
{60, 3600}, /* NPY_FR_h */
@@ -1045,6 +1048,7 @@ _datetime_factors[] = {
1, /* Years - not used */
1, /* Months - not used */
7, /* Weeks -> Days */
+ 1, /* Business Days - was removed but a gap still exists in the enum */
24, /* Days -> Hours */
60, /* Hours -> Minutes */
60, /* Minutes -> Seconds */
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index babbbc9b7..5090dc39c 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -2530,24 +2530,36 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
metadata = NULL;
}
- Py_XDECREF(self->metadata);
if (PyDataType_ISDATETIME(self) && (metadata != NULL)) {
- PyArray_DatetimeMetaData *dt_data;
+ PyObject *old_metadata, *errmsg;
+ PyArray_DatetimeMetaData temp_dt_data;
- /* The Python metadata */
- self->metadata = PyTuple_GET_ITEM(metadata, 0);
+ if ((! PyTuple_Check(metadata)) || (PyTuple_Size(metadata) != 2)) {
+ errmsg = PyUString_FromString("Invalid datetime dtype (metadata, c_metadata): ");
+ PyUString_ConcatAndDel(&errmsg, PyObject_Repr(metadata));
+ PyErr_SetObject(PyExc_ValueError, errmsg);
+ return NULL;
+ }
- /* The datetime metadata */
- dt_data = &(((PyArray_DatetimeDTypeMetaData *)self->c_metadata)->meta);
if (convert_datetime_metadata_tuple_to_datetime_metadata(
PyTuple_GET_ITEM(metadata, 1),
- dt_data) < 0) {
+ &temp_dt_data) < 0) {
return NULL;
}
+
+ old_metadata = self->metadata;
+ self->metadata = PyTuple_GET_ITEM(metadata, 0);
+ memcpy((char *) &((PyArray_DatetimeDTypeMetaData *)self->c_metadata)->meta,
+ (char *) &temp_dt_data,
+ sizeof(PyArray_DatetimeMetaData));
+ Py_XINCREF(self->metadata);
+ Py_XDECREF(old_metadata);
}
else {
+ PyObject *old_metadata = self->metadata;
self->metadata = metadata;
- Py_XINCREF(metadata);
+ Py_XINCREF(self->metadata);
+ Py_XDECREF(old_metadata);
}
Py_INCREF(Py_None);
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index a414f1f37..065a207d2 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -16,6 +16,7 @@
#include "scalartypes.h"
#include "descriptor.h"
#include "getset.h"
+#include "arrayobject.h"
/******************* array attribute get and set routines ******************/
@@ -260,6 +261,10 @@ array_interface_get(PyArrayObject *self)
return NULL;
}
+ if (array_might_be_written(self) < 0) {
+ return NULL;
+ }
+
/* dataptr */
obj = array_dataptr_get(self);
PyDict_SetItemString(dict, "data", obj);
@@ -355,9 +360,12 @@ array_data_set(PyArrayObject *self, PyObject *op)
PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY);
}
Py_DECREF(PyArray_BASE(self));
+ ((PyArrayObject_fields *)self)->base = NULL;
}
Py_INCREF(op);
- ((PyArrayObject_fields *)self)->base = op;
+ if (PyArray_SetBaseObject(self, op) < 0) {
+ return -1;
+ }
((PyArrayObject_fields *)self)->data = buf;
((PyArrayObject_fields *)self)->flags = NPY_ARRAY_CARRAY;
if (!writeable) {
@@ -554,6 +562,11 @@ array_struct_get(PyArrayObject *self)
PyArrayInterface *inter;
PyObject *ret;
+ if (PyArray_ISWRITEABLE(self)) {
+ if (array_might_be_written(self) < 0) {
+ return NULL;
+ }
+ }
inter = (PyArrayInterface *)PyArray_malloc(sizeof(PyArrayInterface));
if (inter==NULL) {
return PyErr_NoMemory();
@@ -816,6 +829,7 @@ array_flat_set(PyArrayObject *self, PyObject *val)
"Cannot delete array flat iterator");
return -1;
}
+ if (PyArray_FailUnlessWriteable(self, "array") < 0) return -1;
typecode = PyArray_DESCR(self);
Py_INCREF(typecode);
arr = (PyArrayObject *)PyArray_FromAny(val, typecode,
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index b7440e7c1..3cbd5391c 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -1092,9 +1092,7 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
PyErr_Format(PyExc_ValueError, "axis(=%d) out of bounds", axis_orig);
return -1;
}
- if (!PyArray_ISWRITEABLE(op)) {
- PyErr_SetString(PyExc_RuntimeError,
- "attempted sort on unwriteable array.");
+ if (PyArray_FailUnlessWriteable(op, "sort array") < 0) {
return -1;
}
@@ -1821,7 +1819,11 @@ PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2,
/*NUMPY_API
* Diagonal
*
- * As of NumPy 1.7, this function always returns a view into 'self'.
+ * In NumPy versions prior to 1.7, this function always returned a copy of
+ * the diagonal array. In 1.7, the code has been updated to compute a view
+ * onto 'self', but it still copies this array before returning, as well as
+ * setting the internal WARN_ON_WRITE flag. In a future version, it will
+ * simply return a view onto self.
*/
NPY_NO_EXPORT PyObject *
PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int axis2)
@@ -1837,6 +1839,7 @@ PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int axis2)
PyArrayObject *ret;
PyArray_Descr *dtype;
npy_intp ret_shape[NPY_MAXDIMS], ret_strides[NPY_MAXDIMS];
+ PyObject *copy;
if (ndim < 2) {
PyErr_SetString(PyExc_ValueError,
@@ -1967,7 +1970,13 @@ PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int axis2)
fret->flags |= NPY_ARRAY_MASKNA;
}
- return (PyObject *)ret;
+ /* For backwards compatibility, during the deprecation period: */
+ copy = PyArray_NewCopy(ret, NPY_KEEPORDER);
+ if (!copy) {
+ return NULL;
+ }
+ PyArray_ENABLEFLAGS((PyArrayObject *)copy, NPY_ARRAY_WARN_ON_WRITE);
+ return copy;
}
/*NUMPY_API
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 581ffebe7..1933b145d 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -1024,6 +1024,9 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val)
"Cannot delete iterator elements");
return -1;
}
+
+ if (PyArray_FailUnlessWriteable(self->ao, "underlying array") < 0)
+ return -1;
if (ind == Py_Ellipsis) {
ind = PySlice_New(NULL, NULL, NULL);
@@ -1203,10 +1206,13 @@ iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op))
/* Two options:
* 1) underlying array is contiguous
- * -- return 1-d wrapper around it
- * 2) underlying array is not contiguous
- * -- make new 1-d contiguous array with updateifcopy flag set
- * to copy back to the old array
+ * -- return 1-d wrapper around it
+ * 2) underlying array is not contiguous
+ * -- make new 1-d contiguous array with updateifcopy flag set
+ * to copy back to the old array
+ *
+ * If underlying array is readonly, then we make the output array readonly
+ * and updateifcopy does not apply.
*/
size = PyArray_SIZE(it->ao);
Py_INCREF(PyArray_DESCR(it->ao));
@@ -1239,14 +1245,16 @@ iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op))
Py_DECREF(ret);
return NULL;
}
- /*
- * Don't use PyArray_SetBaseObject, because that compresses
- * the chain of bases.
- */
- Py_INCREF(it->ao);
- ((PyArrayObject_fields *)ret)->base = (PyObject *)it->ao;
- PyArray_ENABLEFLAGS(ret, NPY_ARRAY_UPDATEIFCOPY);
- PyArray_CLEARFLAGS(it->ao, NPY_ARRAY_WRITEABLE);
+ if (PyArray_ISWRITEABLE(it->ao)) {
+ Py_INCREF(it->ao);
+ if (PyArray_SetUpdateIfCopyBase(ret, it->ao) < 0) {
+ Py_DECREF(ret);
+ return NULL;
+ }
+ }
+ else {
+ PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEABLE);
+ }
}
return ret;
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 60fa3b216..7034f5797 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -171,9 +171,7 @@ array_ass_big_item(PyArrayObject *self, npy_intp i, PyObject *v)
return -1;
}
- if (!PyArray_ISWRITEABLE(self)) {
- PyErr_SetString(PyExc_RuntimeError,
- "array is not writeable");
+ if (PyArray_FailUnlessWriteable(self, "assignment destination") < 0) {
return -1;
}
@@ -1493,9 +1491,7 @@ array_ass_sub(PyArrayObject *self, PyObject *ind, PyObject *op)
"cannot delete array elements");
return -1;
}
- if (!PyArray_ISWRITEABLE(self)) {
- PyErr_SetString(PyExc_RuntimeError,
- "array is not writeable");
+ if (PyArray_FailUnlessWriteable(self, "assignment destination") < 0) {
return -1;
}
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index 29035ad1e..dacfabcbd 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -532,10 +532,7 @@ PyArray_Byteswap(PyArrayObject *self, npy_bool inplace)
copyswapn = PyArray_DESCR(self)->f->copyswapn;
if (inplace) {
- if (!PyArray_ISWRITEABLE(self)) {
- PyErr_SetString(PyExc_RuntimeError,
- "Cannot byte-swap in-place on a " \
- "read-only array");
+ if (PyArray_FailUnlessWriteable(self, "array to be byte-swapped") < 0) {
return NULL;
}
size = PyArray_SIZE(self);
@@ -741,9 +738,7 @@ array_setscalar(PyArrayObject *self, PyObject *args)
"itemset must have at least one argument");
return NULL;
}
- if (!PyArray_ISWRITEABLE(self)) {
- PyErr_SetString(PyExc_RuntimeError,
- "array is not writeable");
+ if (PyArray_FailUnlessWriteable(self, "assignment destination") < 0) {
return NULL;
}
@@ -1683,6 +1678,7 @@ array_setstate(PyArrayObject *self, PyObject *args)
PyArray_CLEARFLAGS(self, NPY_ARRAY_OWNDATA);
}
Py_XDECREF(PyArray_BASE(self));
+ fa->base = NULL;
PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY);
@@ -1755,7 +1751,9 @@ array_setstate(PyArrayObject *self, PyObject *args)
Py_DECREF(rawdata);
}
else {
- fa->base = rawdata;
+ if (PyArray_SetBaseObject(self, rawdata) < 0) {
+ return NULL;
+ }
}
}
else {
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index f1d0c5c38..180c55063 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -1098,11 +1098,9 @@ npyiter_prepare_one_operand(PyArrayObject **op,
if (PyArray_Check(*op)) {
npy_uint32 tmp;
- if (((*op_itflags) & NPY_OP_ITFLAG_WRITE) &&
- (!PyArray_CHKFLAGS(*op, NPY_ARRAY_WRITEABLE))) {
- PyErr_SetString(PyExc_ValueError,
- "Operand was a non-writeable array, but "
- "flagged as writeable");
+ if ((*op_itflags) & NPY_OP_ITFLAG_WRITE
+ && PyArray_FailUnlessWriteable(*op, "operand array with iterator "
+ "write flag set") < 0) {
return 0;
}
if (!(flags & NPY_ITER_ZEROSIZE_OK) && PyArray_SIZE(*op) == 0) {
@@ -2984,15 +2982,11 @@ npyiter_allocate_arrays(NpyIter *iter,
}
/* If the data will be written to, set UPDATEIFCOPY */
if (op_itflags[iop] & NPY_OP_ITFLAG_WRITE) {
- /*
- * Don't use PyArray_SetBaseObject, because that compresses
- * the chain of bases.
- */
Py_INCREF(op[iop]);
- ((PyArrayObject_fields *)temp)->base =
- (PyObject *)op[iop];
- PyArray_ENABLEFLAGS(temp, NPY_ARRAY_UPDATEIFCOPY);
- PyArray_CLEARFLAGS(op[iop], NPY_ARRAY_WRITEABLE);
+ if (PyArray_SetUpdateIfCopyBase(temp, op[iop]) < 0) {
+ Py_DECREF(temp);
+ return 0;
+ }
}
Py_DECREF(op[iop]);
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 52b7b4000..276937be1 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -784,6 +784,7 @@ static char *_datetime_verbose_strings[NPY_DATETIME_NUMUNITS] = {
"years",
"months",
"weeks",
+ "<invalid>",
"days",
"hours",
"minutes",
diff --git a/numpy/core/src/multiarray/sequence.c b/numpy/core/src/multiarray/sequence.c
index 004aa2d78..cb3b30b3a 100644
--- a/numpy/core/src/multiarray/sequence.c
+++ b/numpy/core/src/multiarray/sequence.c
@@ -119,9 +119,7 @@ array_ass_slice(PyArrayObject *self, Py_ssize_t ilow,
"cannot delete array elements");
return -1;
}
- if (!PyArray_ISWRITEABLE(self)) {
- PyErr_SetString(PyExc_RuntimeError,
- "array is not writeable");
+ if (PyArray_FailUnlessWriteable(self, "assignment destination") < 0) {
return -1;
}
tmp = (PyArrayObject *)array_slice(self, ilow, ihigh);
diff --git a/numpy/core/src/npymath/ieee754.c.src b/numpy/core/src/npymath/ieee754.c.src
index 3ece2b874..90bbf5fb6 100644
--- a/numpy/core/src/npymath/ieee754.c.src
+++ b/numpy/core/src/npymath/ieee754.c.src
@@ -18,6 +18,13 @@ double npy_copysign(double x, double y)
}
#endif
+/*
+ The below code is provided for compilers which do not yet provide C11 compatibility (gcc 4.5 and older)
+ */
+#ifndef LDBL_TRUE_MIN
+#define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
+#endif
+
#if !defined(HAVE_DECL_SIGNBIT)
#include "_signbit.c"
@@ -205,7 +212,7 @@ npy_longdouble _nextl(npy_longdouble x, int p)
}
if(ihx <= 0x0360000000000000LL) { /* x <= LDBL_MIN */
u = math_opt_barrier (x);
- x -= __LDBL_DENORM_MIN__;
+ x -= LDBL_TRUE_MIN;
if (ihx < 0x0360000000000000LL
|| (hx > 0 && (npy_int64) lx <= 0)
|| (hx < 0 && (npy_int64) lx > 1)) {
@@ -229,14 +236,14 @@ npy_longdouble _nextl(npy_longdouble x, int p)
}
if(ihx <= 0x0360000000000000LL) { /* x <= LDBL_MIN */
u = math_opt_barrier (x);
- x += __LDBL_DENORM_MIN__;
+ x += LDBL_TRUE_MIN;
if (ihx < 0x0360000000000000LL
|| (hx > 0 && (npy_int64) lx < 0 && lx != 0x8000000000000001LL)
|| (hx < 0 && (npy_int64) lx >= 0)) {
u = u * u;
math_force_eval (u); /* raise underflow flag */
}
- if (x == 0.0L) /* handle negative __LDBL_DENORM_MIN__ case */
+ if (x == 0.0L) /* handle negative LDBL_TRUE_MIN case */
x = -0.0L;
return x;
}
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 93f63038a..3b62e150f 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -795,9 +795,8 @@ static int get_ufunc_arguments(PyUFuncObject *ufunc,
}
/* If it's an array, can use it */
if (PyArray_Check(obj)) {
- if (!PyArray_ISWRITEABLE((PyArrayObject *)obj)) {
- PyErr_SetString(PyExc_ValueError,
- "return array is not writeable");
+ if (PyArray_FailUnlessWriteable((PyArrayObject *)obj,
+ "output array") < 0) {
return -1;
}
Py_INCREF(obj);
@@ -894,9 +893,9 @@ static int get_ufunc_arguments(PyUFuncObject *ufunc,
}
if (PyArray_Check(value)) {
- if (!PyArray_ISWRITEABLE((PyArrayObject *)value)) {
- PyErr_SetString(PyExc_ValueError,
- "return array is not writeable");
+ const char *name = "output array";
+ PyArrayObject *value_arr = (PyArrayObject *)value;
+ if (PyArray_FailUnlessWriteable(value_arr, name) < 0) {
goto fail;
}
Py_INCREF(value);
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index cb62182cd..fbde86b57 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -547,6 +547,14 @@ class TestDateTime(TestCase):
"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(asbytes(pkl)), np.dtype('>M8[us]'))
+ def test_setstate(self):
+ "Verify that datetime dtype __setstate__ can handle bad arguments"
+ dt = np.dtype('>M8[us]')
+ assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1))
+ assert (dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
+ assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx')))
+ assert (dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
+
def test_dtype_promotion(self):
# datetime <op> datetime computes the metadata gcd
# timedelta <op> timedelta computes the metadata gcd
diff --git a/numpy/core/tests/test_maskna.py b/numpy/core/tests/test_maskna.py
index 4efd5abcf..c19cd70c2 100644
--- a/numpy/core/tests/test_maskna.py
+++ b/numpy/core/tests/test_maskna.py
@@ -1417,11 +1417,9 @@ def test_array_maskna_diagonal():
a.shape = (2,3)
a[0,1] = np.NA
- # Should produce a view into a
res = a.diagonal()
- assert_(res.base is a)
assert_(res.flags.maskna)
- assert_(not res.flags.ownmaskna)
+ assert_(res.flags.ownmaskna)
assert_equal(res, [0, 4])
res = a.diagonal(-1)
@@ -1593,6 +1591,8 @@ def test_array_maskna_linspace_logspace():
assert_(b.flags.maskna)
+from numpy.testing import dec
+@dec.knownfailureif(True, "eye is not implemented for maskna")
def test_array_maskna_eye_identity():
# np.eye
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index c00930f6d..e9c7a9c4f 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -29,8 +29,8 @@ class TestFlags(TestCase):
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
- self.assertRaises(RuntimeError, runstring, 'self.a[0] = 3', mydict)
- self.assertRaises(RuntimeError, runstring, 'self.a[0:1].itemset(3)', mydict)
+ self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
+ self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
@@ -792,6 +792,148 @@ class TestMethods(TestCase):
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
+ def test_diagonal(self):
+ a = np.arange(12).reshape((3, 4))
+ assert_equal(a.diagonal(), [0, 5, 10])
+ assert_equal(a.diagonal(0), [0, 5, 10])
+ assert_equal(a.diagonal(1), [1, 6, 11])
+ assert_equal(a.diagonal(-1), [4, 9])
+
+ b = np.arange(8).reshape((2, 2, 2))
+ assert_equal(b.diagonal(), [[0, 6], [1, 7]])
+ assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
+ assert_equal(b.diagonal(1), [[2], [3]])
+ assert_equal(b.diagonal(-1), [[4], [5]])
+ assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
+ assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
+ assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
+ assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
+ # Order of axis argument doesn't matter:
+ assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
+
+ def test_diagonal_deprecation(self):
+ import warnings
+ from numpy.testing.utils import WarningManager
+ def collect_warning_types(f, *args, **kwargs):
+ ctx = WarningManager(record=True)
+ warning_log = ctx.__enter__()
+ warnings.simplefilter("always")
+ try:
+ f(*args, **kwargs)
+ finally:
+ ctx.__exit__()
+ return [w.category for w in warning_log]
+ a = np.arange(9).reshape(3, 3)
+ # All the different functions raise a warning, but not an error, and
+ # 'a' is not modified:
+ assert_equal(collect_warning_types(a.diagonal().__setitem__, 0, 10),
+ [DeprecationWarning])
+ assert_equal(a, np.arange(9).reshape(3, 3))
+ assert_equal(collect_warning_types(np.diagonal(a).__setitem__, 0, 10),
+ [DeprecationWarning])
+ assert_equal(a, np.arange(9).reshape(3, 3))
+ assert_equal(collect_warning_types(np.diag(a).__setitem__, 0, 10),
+ [DeprecationWarning])
+ assert_equal(a, np.arange(9).reshape(3, 3))
+ # Views also warn
+ d = np.diagonal(a)
+ d_view = d.view()
+ assert_equal(collect_warning_types(d_view.__setitem__, 0, 10),
+ [DeprecationWarning])
+ # But the write goes through:
+ assert_equal(d[0], 10)
+ # Only one warning per call to diagonal, though (even if there are
+ # multiple views involved):
+ assert_equal(collect_warning_types(d.__setitem__, 0, 10),
+ [])
+
+ # Other ways of accessing the data also warn:
+ # .data goes via the C buffer API, gives a read-write
+ # buffer/memoryview. We don't warn until tp_getwritebuf is actually
+ # called, which is not until the buffer is written to.
+ have_memoryview = (hasattr(__builtins__, "memoryview")
+ or "memoryview" in __builtins__)
+ def get_data_and_write(getter):
+ buf_or_memoryview = getter(a.diagonal())
+ if (have_memoryview and isinstance(buf_or_memoryview, memoryview)):
+ buf_or_memoryview[0] = np.array(1)
+ else:
+ buf_or_memoryview[0] = "x"
+ assert_equal(collect_warning_types(get_data_and_write,
+ lambda d: d.data),
+ [DeprecationWarning])
+ if hasattr(np, "getbuffer"):
+ assert_equal(collect_warning_types(get_data_and_write,
+ np.getbuffer),
+ [DeprecationWarning])
+ # PEP 3118:
+ if have_memoryview:
+ assert_equal(collect_warning_types(get_data_and_write, memoryview),
+ [DeprecationWarning])
+ # Void dtypes can give us a read-write buffer, but only in Python 2:
+ import sys
+ if sys.version_info[0] < 3:
+ aV = np.empty((3, 3), dtype="V10")
+ assert_equal(collect_warning_types(aV.diagonal().item, 0),
+ [DeprecationWarning])
+ # XX it seems that direct indexing of a void object returns a void
+ # scalar, which ignores not just WARN_ON_WRITE but even WRITEABLE.
+ # i.e. in this:
+ # a = np.empty(10, dtype="V10")
+ # a.flags.writeable = False
+ # buf = a[0].item()
+ # 'buf' ends up as a writeable buffer. I guess no-one actually
+ # uses void types like this though...
+ # __array_interface also lets a data pointer get away from us
+ log = collect_warning_types(getattr, a.diagonal(),
+ "__array_interface__")
+ assert_equal(log, [DeprecationWarning])
+ # ctypeslib goes via __array_interface__:
+ try:
+ # may not exist in python 2.4:
+ import ctypes
+ except ImportError:
+ pass
+ else:
+ log = collect_warning_types(np.ctypeslib.as_ctypes, a.diagonal())
+ assert_equal(log, [DeprecationWarning])
+ # __array_struct__
+ log = collect_warning_types(getattr, a.diagonal(), "__array_struct__")
+ assert_equal(log, [DeprecationWarning])
+
+ # Make sure that our recommendation to silence the warning by copying
+ # the array actually works:
+ diag_copy = a.diagonal().copy()
+ assert_equal(collect_warning_types(diag_copy.__setitem__, 0, 10),
+ [])
+ # There might be people who get a spurious warning because they are
+ # extracting a buffer, but then use that buffer in a read-only
+ # fashion. And they might get cranky at having to create a superfluous
+ # copy just to work around this spurious warning. A reasonable
+ # solution would be for them to mark their usage as read-only, and
+ # thus safe for both past and future PyArray_Diagonal
+ # semantics. So let's make sure that setting the diagonal array to
+ # non-writeable will suppress these warnings:
+ ro_diag = a.diagonal()
+ ro_diag.flags.writeable = False
+ assert_equal(collect_warning_types(getattr, ro_diag, "data"), [])
+ # __array_interface__ has no way to communicate read-onlyness --
+ # effectively all __array_interface__ arrays are assumed to be
+ # writeable :-(
+ # ro_diag = a.diagonal()
+ # ro_diag.flags.writeable = False
+ # assert_equal(collect_warning_types(getattr, ro_diag,
+ # "__array_interface__"), [])
+ if hasattr(__builtins__, "memoryview"):
+ ro_diag = a.diagonal()
+ ro_diag.flags.writeable = False
+ assert_equal(collect_warning_types(memoryview, ro_diag), [])
+ ro_diag = a.diagonal()
+ ro_diag.flags.writeable = False
+ assert_equal(collect_warning_types(getattr, ro_diag,
+ "__array_struct__"), [])
+
+
def test_ravel(self):
a = np.array([[0,1],[2,3]])
assert_equal(a.ravel(), [0,1,2,3])
@@ -1531,6 +1673,52 @@ class TestFromBuffer(object):
yield self.tst_basic, asbytes(''), np.array([]), {}
+class TestFlat(TestCase):
+ def setUp(self):
+ a0 = arange(20.0)
+ a = a0.reshape(4,5)
+ a0.shape = (4,5)
+ a.flags.writeable = False
+ self.a = a
+ self.b = a[::2,::2]
+ self.a0 = a0
+ self.b0 = a0[::2,::2]
+
+ def test_contiguous(self):
+ testpassed = False
+ try:
+ self.a.flat[12] = 100.0
+ except ValueError:
+ testpassed = True
+ assert testpassed
+ assert self.a.flat[12] == 12.0
+
+ def test_discontiguous(self):
+ testpassed = False
+ try:
+ self.b.flat[4] = 100.0
+ except ValueError:
+ testpassed = True
+ assert testpassed
+ assert self.b.flat[4] == 12.0
+
+ def test___array__(self):
+ c = self.a.flat.__array__()
+ d = self.b.flat.__array__()
+ e = self.a0.flat.__array__()
+ f = self.b0.flat.__array__()
+
+ assert c.flags.writeable is False
+ assert d.flags.writeable is False
+ assert e.flags.writeable is True
+ assert f.flags.writeable is True
+
+ assert c.flags.updateifcopy is False
+ assert d.flags.updateifcopy is False
+ assert e.flags.updateifcopy is False
+ assert f.flags.updateifcopy is True
+ assert f.base is self.b0
+
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 3d18a9b98..7a316ac3a 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -865,7 +865,6 @@ def test_iter_array_cast():
i = None
assert_equal(a[2,1,1], -12.5)
- # Unsafe cast 'f4' -> 'i4'
a = np.arange(6, dtype='i4')[::-2]
i = nditer(a, [],
[['writeonly','updateifcopy']],
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 2de5c6193..6d9e65697 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1,12 +1,13 @@
__docformat__ = "restructuredtext en"
__all__ = ['select', 'piecewise', 'trim_zeros', 'copy', 'iterable',
- 'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex',
- 'disp', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax',
- 'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average',
- 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
- 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
- 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
- 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc']
+ 'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex',
+ 'disp', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax',
+ 'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average',
+ 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov',
+ 'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning',
+ 'bartlett', 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc',
+ 'add_docstring', 'meshgrid', 'delete', 'insert', 'append', 'interp',
+ 'add_newdoc_ufunc']
import warnings
import types
@@ -350,7 +351,6 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
- outliers = zeros(N, int)
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
@@ -369,7 +369,6 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
- shape = []
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
@@ -1698,80 +1697,9 @@ def disp(mesg, device=None, linefeed=True):
device.flush()
return
-# return number of input arguments and
-# number of default arguments
-
-def _get_nargs(obj):
- import re
-
- terr = re.compile(r'.*? takes (exactly|at least) (?P<exargs>(\d+)|(\w+))' +
- r' argument(s|) \((?P<gargs>(\d+)|(\w+)) given\)')
- def _convert_to_int(strval):
- try:
- result = int(strval)
- except ValueError:
- if strval=='zero':
- result = 0
- elif strval=='one':
- result = 1
- elif strval=='two':
- result = 2
- # How high to go? English only?
- else:
- raise
- return result
-
- if not callable(obj):
- raise TypeError(
- "Object is not callable.")
- if sys.version_info[0] >= 3:
- # inspect currently fails for binary extensions
- # like math.cos. So fall back to other methods if
- # it fails.
- import inspect
- try:
- spec = inspect.getargspec(obj)
- nargs = len(spec.args)
- if spec.defaults:
- ndefaults = len(spec.defaults)
- else:
- ndefaults = 0
- if inspect.ismethod(obj):
- nargs -= 1
- return nargs, ndefaults
- except:
- pass
-
- if hasattr(obj,'func_code'):
- fcode = obj.func_code
- nargs = fcode.co_argcount
- if obj.func_defaults is not None:
- ndefaults = len(obj.func_defaults)
- else:
- ndefaults = 0
- if isinstance(obj, types.MethodType):
- nargs -= 1
- return nargs, ndefaults
-
- try:
- obj()
- return 0, 0
- except TypeError, msg:
- m = terr.match(str(msg))
- if m:
- nargs = _convert_to_int(m.group('exargs'))
- ndefaults = _convert_to_int(m.group('gargs'))
- if isinstance(obj, types.MethodType):
- nargs -= 1
- return nargs, ndefaults
-
- raise ValueError(
- "failed to determine the number of arguments for %s" % (obj))
-
-
class vectorize(object):
"""
- vectorize(pyfunc, otypes='', doc=None)
+ vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
@@ -1794,13 +1722,30 @@ class vectorize(object):
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
- The docstring for the function. If None, the docstring will be the
- `pyfunc` one.
+ The docstring for the function. If `None`, the docstring will be the
+ ``pyfunc.__doc__``.
+ excluded : set, optional
+ Set of strings or integers representing the positional or keyword
+ arguments for which the function will not be vectorized. These will be
+ passed directly to `pyfunc` unmodified.
+
+ .. versionadded:: 1.7.0
+
+ cache : bool, optional
+ If `True`, then cache the first function call that determines the number
+ of outputs if `otypes` is not provided.
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ vectorized : callable
+ Vectorized function.
Examples
--------
>>> def myfunc(a, b):
- ... \"\"\"Return a-b if a>b, otherwise return a+b\"\"\"
+ ... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
@@ -1830,78 +1775,169 @@ class vectorize(object):
>>> type(out[0])
<type 'numpy.float64'>
+ The `excluded` argument can be used to prevent vectorizing over certain
+ arguments. This can be useful for array-like arguments of a fixed length
+ such as the coefficients for a polynomial as in `polyval`:
+
+ >>> def mypolyval(p, x):
+ ... _p = list(p)
+ ... res = _p.pop(0)
+ ... while _p:
+ ... res = res*x + _p.pop(0)
+ ... return res
+ >>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
+ >>> vpolyval(p=[1, 2, 3], x=[0, 1])
+ array([3, 6])
+
+ Positional arguments may also be excluded by specifying their position:
+
+ >>> vpolyval.excluded.add(0)
+ >>> vpolyval([1, 2, 3], x=[0, 1])
+ array([3, 6])
+
+ Notes
+ -----
+ The `vectorize` function is provided primarily for convenience, not for
+ performance. The implementation is essentially a for loop.
+
+ If `otypes` is not specified, then a call to the function with the first
+ argument will be used to determine the number of outputs. The results of
+ this call will be cached if `cache` is `True` to prevent calling the
+ function twice. However, to implement the cache, the original function must
+ be wrapped which will slow down subsequent calls, so only do this if your
+ function is expensive.
+
+ The new keyword argument interface and `excluded` argument support further
+ degrades performance.
"""
- def __init__(self, pyfunc, otypes='', doc=None):
- self.thefunc = pyfunc
- self.ufunc = None
- nin, ndefault = _get_nargs(pyfunc)
- if nin == 0 and ndefault == 0:
- self.nin = None
- self.nin_wo_defaults = None
- else:
- self.nin = nin
- self.nin_wo_defaults = nin - ndefault
- self.nout = None
+ def __init__(self, pyfunc, otypes='', doc=None, excluded=None, cache=False):
+ self.pyfunc = pyfunc
+ self.cache = cache
+
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
+
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
- raise ValueError(
- "invalid otype specified")
+ raise ValueError("Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
- raise ValueError(
- "Invalid otype specification")
- self.lastcallargs = 0
-
- def __call__(self, *args):
- # get number of outputs and output types by calling
- # the function on the first entries of args
- nargs = len(args)
- if self.nin:
- if (nargs > self.nin) or (nargs < self.nin_wo_defaults):
- raise ValueError(
- "Invalid number of arguments")
-
- # we need a new ufunc if this is being called with more arguments.
- if (self.lastcallargs != nargs):
- self.lastcallargs = nargs
- self.ufunc = None
- self.nout = None
-
- if self.nout is None or self.otypes == '':
- newargs = []
- for arg in args:
- newargs.append(asarray(arg).flat[0])
- theout = self.thefunc(*newargs)
- if isinstance(theout, tuple):
- self.nout = len(theout)
+ raise ValueError("Invalid otype specification")
+
+ # Excluded variable support
+ if excluded is None:
+ excluded = set()
+ self.excluded = set(excluded)
+
+ if self.otypes and not self.excluded:
+ self._ufunc = None # Caching to improve default performance
+
+ def __call__(self, *args, **kwargs):
+ """
+ Return arrays with the results of `pyfunc` broadcast (vectorized) over
+ `args` and `kwargs` not in `excluded`.
+ """
+ excluded = self.excluded
+ if not kwargs and not excluded:
+ func = self.pyfunc
+ vargs = args
+ else:
+ # The wrapper accepts only positional arguments: we use `names` and
+ # `inds` to mutate `the_args` and `kwargs` to pass to the original
+ # function.
+ nargs = len(args)
+
+ names = [_n for _n in kwargs if _n not in excluded]
+ inds = [_i for _i in range(nargs) if _i not in excluded]
+ the_args = list(args)
+ def func(*vargs):
+ for _n, _i in enumerate(inds):
+ the_args[_i] = vargs[_n]
+ kwargs.update(zip(names, vargs[len(inds):]))
+ return self.pyfunc(*the_args, **kwargs)
+
+ vargs = [args[_i] for _i in inds]
+ vargs.extend([kwargs[_n] for _n in names])
+
+ return self._vectorize_call(func=func, args=vargs)
+
+ def _get_ufunc_and_otypes(self, func, args):
+ """Return (ufunc, otypes)."""
+ # frompyfunc will fail if args is empty
+ assert args
+
+ if self.otypes:
+ otypes = self.otypes
+ nout = len(otypes)
+
+ # Note logic here: We only *use* self._ufunc if func is self.pyfunc
+ # even though we set self._ufunc regardless.
+ if func is self.pyfunc and self._ufunc is not None:
+ ufunc = self._ufunc
+ else:
+ ufunc = self._ufunc = frompyfunc(func, len(args), nout)
+ else:
+ # Get number of outputs and output types by calling the function on
+ # the first entries of args. We also cache the result to prevent
+ # the subsequent call when the ufunc is evaluated.
+ # Assumes that ufunc first evaluates the 0th elements in the input
+ # arrays (the input values are not checked to ensure this)
+ inputs = [asarray(_a).flat[0] for _a in args]
+ outputs = func(*inputs)
+
+ # Performance note: profiling indicates that -- for simple functions
+ # at least -- this wrapping can almost double the execution time.
+ # Hence we make it optional.
+ if self.cache:
+ _cache = [outputs]
+ def _func(*vargs):
+ if _cache:
+ return _cache.pop()
+ else:
+ return func(*vargs)
+ else:
+ _func = func
+
+ if isinstance(outputs, tuple):
+ nout = len(outputs)
else:
- self.nout = 1
- theout = (theout,)
- if self.otypes == '':
- otypes = []
- for k in range(self.nout):
- otypes.append(asarray(theout[k]).dtype.char)
- self.otypes = ''.join(otypes)
-
- # Create ufunc if not already created
- if (self.ufunc is None):
- self.ufunc = frompyfunc(self.thefunc, nargs, self.nout)
-
- # Convert to object arrays first
- newargs = [array(arg,copy=False,subok=True,dtype=object) for arg in args]
- if self.nout == 1:
- _res = array(self.ufunc(*newargs),copy=False,
- subok=True,dtype=self.otypes[0])
+ nout = 1
+ outputs = (outputs,)
+
+ otypes = ''.join([asarray(outputs[_k]).dtype.char
+ for _k in range(nout)])
+
+ # Performance note: profiling indicates that creating the ufunc is
+ # not a significant cost compared with wrapping so it seems not
+ # worth trying to cache this.
+ ufunc = frompyfunc(_func, len(args), nout)
+
+ return ufunc, otypes
+
+ def _vectorize_call(self, func, args):
+ """Vectorized call to `func` over positional `args`."""
+ if not args:
+ _res = func()
else:
- _res = tuple([array(x,copy=False,subok=True,dtype=c) \
- for x, c in zip(self.ufunc(*newargs), self.otypes)])
+ ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
+
+ # Convert args to object arrays first
+ inputs = [array(_a, copy=False, subok=True, dtype=object)
+ for _a in args]
+
+ outputs = ufunc(*inputs)
+
+ if ufunc.nout == 1:
+ _res = array(outputs,
+ copy=False, subok=True, dtype=otypes[0])
+ else:
+ _res = tuple([array(_x, copy=False, subok=True, dtype=_t)
+ for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
@@ -2595,7 +2631,7 @@ def i0(x):
References
----------
- .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions," in
+ .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index c29f3a6d3..6f2aa1d02 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -658,9 +658,8 @@ s_ = IndexExpression(maketuple=False)
# The following functions complement those in twodim_base, but are
# applicable to N-dimensions.
-def fill_diagonal(a, val):
- """
- Fill the main diagonal of the given array of any dimensionality.
+def fill_diagonal(a, val, wrap=False):
+ """Fill the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim > 2``, the diagonal is the list of
locations with indices ``a[i, i, ..., i]`` all identical. This function
@@ -675,6 +674,10 @@ def fill_diagonal(a, val):
Value to be written on the diagonal, its type must be compatible with
that of the array a.
+ wrap: bool For tall matrices in NumPy version up to 1.6.2, the
+ diagonal "wrapped" after N columns. You can have this behavior
+ with this option. This affect only tall matrices.
+
See also
--------
diag_indices, diag_indices_from
@@ -716,13 +719,42 @@ def fill_diagonal(a, val):
[0, 0, 0],
[0, 0, 4]])
+ # tall matrices no wrap
+ >>> a = np.zeros((5, 3),int)
+ >>> fill_diagonal(a, 4)
+ array([[4, 0, 0],
+ [0, 4, 0],
+ [0, 0, 4],
+ [0, 0, 0],
+ [0, 0, 0]])
+
+ # tall matrices wrap
+ >>> a = np.zeros((5, 3),int)
+ >>> fill_diagonal(a, 4)
+ array([[4, 0, 0],
+ [0, 4, 0],
+ [0, 0, 4],
+ [0, 0, 0],
+ [4, 0, 0]])
+
+ # wide matrices
+ >>> a = np.zeros((3, 5),int)
+ >>> fill_diagonal(a, 4)
+ array([[4, 0, 0, 0, 0],
+ [0, 4, 0, 0, 0],
+ [0, 0, 4, 0, 0]])
+
"""
if a.ndim < 2:
raise ValueError("array must be at least 2-d")
+ end = None
if a.ndim == 2:
# Explicit, fast formula for the common case. For 2-d arrays, we
# accept rectangular ones.
step = a.shape[1] + 1
+ #This is needed to don't have tall matrix have the diagonal wrap.
+ if not wrap:
+ end = a.shape[1] * a.shape[1]
else:
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
@@ -731,7 +763,7 @@ def fill_diagonal(a, val):
step = 1 + (cumprod(a.shape[:-1])).sum()
# Write the value out into the diagonal.
- a.flat[::step] = val
+ a.flat[:end:step] = val
def diag_indices(n, ndim=2):
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 7245b8962..95b32e47c 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -274,7 +274,7 @@ class TestGradient(TestCase):
assert_array_equal(gradient(v), dx)
def test_badargs(self):
- # for 2D array, gradient can take 0,1, or 2 extra args
+ # for 2D array, gradient can take 0, 1, or 2 extra args
x = np.array([[1, 1], [3, 4]])
assert_raises(SyntaxError, gradient, x, np.array([1., 1.]),
np.array([1., 1.]), np.array([1., 1.]))
@@ -394,12 +394,12 @@ class TestVectorize(TestCase):
def foo(a, b=1):
return a + b
f = vectorize(foo)
- args = np.array([1,2,3])
+ args = np.array([1, 2, 3])
r1 = f(args)
- r2 = np.array([2,3,4])
+ r2 = np.array([2, 3, 4])
assert_array_equal(r1, r2)
r1 = f(args, 2)
- r2 = np.array([3,4,5])
+ r2 = np.array([3, 4, 5])
assert_array_equal(r1, r2)
def test_keywords_no_func_code(self):
@@ -412,6 +412,107 @@ class TestVectorize(TestCase):
except:
raise AssertionError()
+ def test_keywords2_ticket_2100(self):
+ r"""Test kwarg support: enhancement ticket 2100"""
+ import math
+ def foo(a, b=1):
+ return a + b
+ f = vectorize(foo)
+ args = np.array([1, 2, 3])
+ r1 = f(a=args)
+ r2 = np.array([2, 3, 4])
+ assert_array_equal(r1, r2)
+ r1 = f(b=1, a=args)
+ assert_array_equal(r1, r2)
+ r1 = f(args, b=2)
+ r2 = np.array([3, 4, 5])
+ assert_array_equal(r1, r2)
+
+ def test_keywords3_ticket_2100(self):
+ """Test excluded with mixed positional and kwargs: ticket 2100"""
+ def mypolyval(x, p):
+ _p = list(p)
+ res = _p.pop(0)
+ while _p:
+ res = res*x + _p.pop(0)
+ return res
+ vpolyval = np.vectorize(mypolyval, excluded=['p',1])
+ ans = [3, 6]
+ assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3]))
+ assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3]))
+ assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3]))
+
+ def test_keywords4_ticket_2100(self):
+ """Test vectorizing function with no positional args."""
+ @vectorize
+ def f(**kw):
+ res = 1.0
+ for _k in kw:
+ res *= kw[_k]
+ return res
+ assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8])
+
+ def test_keywords5_ticket_2100(self):
+ """Test vectorizing function with no kwargs args."""
+ @vectorize
+ def f(*v):
+ return np.prod(v)
+ assert_array_equal(f([1, 2], [3, 4]), [3, 8])
+
+ def test_coverage1_ticket_2100(self):
+ def foo():
+ return 1
+ f = vectorize(foo)
+ assert_array_equal(f(), 1)
+
+ def test_assigning_docstring(self):
+ def foo(x):
+ return x
+ doc = "Provided documentation"
+ f = vectorize(foo, doc=doc)
+ assert_equal(f.__doc__, doc)
+
+ def test_UnboundMethod_ticket_1156(self):
+ """Regression test for issue 1156"""
+ class Foo:
+ b = 2
+ def bar(self, a):
+ return a**self.b
+ assert_array_equal(vectorize(Foo().bar)(np.arange(9)),
+ np.arange(9)**2)
+ assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)),
+ np.arange(9)**2)
+
+ def test_execution_order_ticket_1487(self):
+ """Regression test for dependence on execution order: issue 1487"""
+ f1 = vectorize(lambda x: x)
+ res1a = f1(np.arange(3))
+ res1b = f1(np.arange(0.1, 3))
+ f2 = vectorize(lambda x: x)
+ res2b = f2(np.arange(0.1, 3))
+ res2a = f2(np.arange(3))
+ assert_equal(res1a, res2a)
+ assert_equal(res1b, res2b)
+
+ def test_string_ticket_1892(self):
+ """Test vectorization over strings: issue 1892."""
+ f = np.vectorize(lambda x:x)
+ s = '0123456789'*10
+ assert_equal(s, f(s))
+ #z = f(np.array([s,s]))
+ #assert_array_equal([s,s], f(s))
+
+ def test_cache(self):
+ """Ensure that vectorized func called exactly once per argument."""
+ _calls = [0]
+ @vectorize
+ def f(x):
+ _calls[0] += 1
+ return x**2
+ f.cache = True
+ x = np.arange(5)
+ assert_array_equal(f(x), x*x)
+ assert_equal(_calls[0], len(x))
class TestDigitize(TestCase):
def test_forward(self):
@@ -430,17 +531,17 @@ class TestDigitize(TestCase):
assert_(np.all(digitize(x, bin) != 0))
def test_right_basic(self):
- x = [1,5,4,10,8,11,0]
- bins = [1,5,10]
- default_answer = [1,2,1,3,2,3,0]
+ x = [1, 5, 4, 10, 8, 11, 0]
+ bins = [1, 5, 10]
+ default_answer = [1, 2, 1, 3, 2, 3, 0]
assert_array_equal(digitize(x, bins), default_answer)
- right_answer = [0,1,1,2,2,3,0]
+ right_answer = [0, 1, 1, 2, 2, 3, 0]
assert_array_equal(digitize(x, bins, True), right_answer)
def test_right_open(self):
x = np.arange(-6, 5)
bins = np.arange(-6, 4)
- assert_array_equal(digitize(x,bins,True), np.arange(11))
+ assert_array_equal(digitize(x, bins, True), np.arange(11))
def test_right_open_reverse(self):
x = np.arange(5, -6, -1)
@@ -598,10 +699,10 @@ class TestHistogram(TestCase):
def test_one_bin(self):
# Ticket 632
hist, edges = histogram([1, 2, 3, 4], [1, 2])
- assert_array_equal(hist, [2, ])
+ assert_array_equal(hist, [2,])
assert_array_equal(edges, [1, 2])
assert_raises(ValueError, histogram, [1, 2], bins=0)
- h, e = histogram([1,2], bins=1)
+ h, e = histogram([1, 2], bins=1)
assert_equal(h, np.array([2]))
assert_allclose(e, np.array([1., 2.]))
@@ -630,7 +731,7 @@ class TestHistogram(TestCase):
# Check with non-constant bin widths
v = np.arange(10)
- bins = [0,1,3,6,10]
+ bins = [0, 1, 3, 6, 10]
a, b = histogram(v, bins, density=True)
assert_array_equal(a, .1)
assert_equal(np.sum(a*diff(b)), 1)
@@ -638,13 +739,13 @@ class TestHistogram(TestCase):
# Variale bin widths are especially useful to deal with
# infinities.
v = np.arange(10)
- bins = [0,1,3,6,np.inf]
+ bins = [0, 1, 3, 6, np.inf]
a, b = histogram(v, bins, density=True)
- assert_array_equal(a, [.1,.1,.1,0.])
+ assert_array_equal(a, [.1, .1, .1, 0.])
# Taken from a bug report from N. Becker on the numpy-discussion
# mailing list Aug. 6, 2010.
- counts, dmy = np.histogram([1,2,3,4], [0.5,1.5,np.inf], density=True)
+ counts, dmy = np.histogram([1, 2, 3, 4], [0.5, 1.5, np.inf], density=True)
assert_equal(counts, [.25, 0])
def test_outliers(self):
@@ -709,12 +810,12 @@ class TestHistogram(TestCase):
assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4)
# Check weights with non-uniform bin widths
- a,b = histogram(np.arange(9), [0,1,3,6,10], \
- weights=[2,1,1,1,1,1,1,1,1], density=True)
+ a, b = histogram(np.arange(9), [0, 1, 3, 6, 10], \
+ weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True)
assert_almost_equal(a, [.2, .1, .1, .075])
def test_empty(self):
- a, b = histogram([], bins=([0,1]))
+ a, b = histogram([], bins=([0, 1]))
assert_array_equal(a, np.array([0]))
assert_array_equal(b, np.array([0, 1]))
@@ -792,7 +893,7 @@ class TestHistogramdd(TestCase):
assert_array_equal(edges[0], np.array([-0.5, 0. , 0.5]))
def test_empty(self):
- a, b = histogramdd([[], []], bins=([0,1], [0,1]))
+ a, b = histogramdd([[], []], bins=([0, 1], [0, 1]))
assert_array_max_ulp(a, np.array([[ 0.]]))
a, b = np.histogramdd([[], [], []], bins=2)
assert_array_max_ulp(a, np.zeros((2, 2, 2)))
@@ -1011,7 +1112,7 @@ class TestCorrCoef(TestCase):
class TestCov(TestCase):
def test_basic(self):
x = np.array([[0, 2], [1, 1], [2, 0]]).T
- assert_allclose(np.cov(x), np.array([[ 1.,-1.], [-1.,1.]]))
+ assert_allclose(np.cov(x), np.array([[ 1., -1.], [-1., 1.]]))
def test_empty(self):
assert_equal(cov(np.array([])).size, 0)
@@ -1162,7 +1263,7 @@ class TestBincount(TestCase):
def test_empty(self):
x = np.array([], dtype=int)
y = np.bincount(x)
- assert_array_equal(x,y)
+ assert_array_equal(x, y)
def test_empty_with_minlength(self):
x = np.array([], dtype=int)
@@ -1182,10 +1283,10 @@ class TestInterp(TestCase):
assert_almost_equal(np.interp(x0, x, y), x0)
def test_right_left_behavior(self):
- assert_equal(interp([-1, 0, 1], [0], [1]), [1,1,1])
- assert_equal(interp([-1, 0, 1], [0], [1], left=0), [0,1,1])
- assert_equal(interp([-1, 0, 1], [0], [1], right=0), [1,1,0])
- assert_equal(interp([-1, 0, 1], [0], [1], left=0, right=0), [0,1,0])
+ assert_equal(interp([-1, 0, 1], [0], [1]), [1, 1, 1])
+ assert_equal(interp([-1, 0, 1], [0], [1], left=0), [0, 1, 1])
+ assert_equal(interp([-1, 0, 1], [0], [1], right=0), [1, 1, 0])
+ assert_equal(interp([-1, 0, 1], [0], [1], left=0, right=0), [0, 1, 0])
def test_scalar_interpolation_point(self):
x = np.linspace(0, 1, 5)
@@ -1255,10 +1356,10 @@ class TestAdd_newdoc_ufunc(TestCase):
def test_ufunc_arg(self):
assert_raises(TypeError, add_newdoc_ufunc, 2, "blah")
- assert_raises(ValueError, add_newdoc_ufunc,np.add, "blah")
+ assert_raises(ValueError, add_newdoc_ufunc, np.add, "blah")
def test_string_arg(self):
- assert_raises(TypeError, add_newdoc_ufunc,np.add, 3)
+ assert_raises(TypeError, add_newdoc_ufunc, np.add, 3)
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index 2c6500a57..beda2d146 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -159,6 +159,44 @@ def test_fill_diagonal():
[0, 5, 0],
[0, 0, 5]]))
+ #Test tall matrix
+ a = zeros((10, 3),int)
+ fill_diagonal(a, 5)
+ yield (assert_array_equal, a,
+ array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0]]))
+
+ #Test tall matrix wrap
+ a = zeros((10, 3),int)
+ fill_diagonal(a, 5, True)
+ yield (assert_array_equal, a,
+ array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [5, 0, 0],
+ [0, 5, 0]]))
+
+ #Test wide matrix
+ a = zeros((3, 10),int)
+ fill_diagonal(a, 5)
+ yield (assert_array_equal, a,
+ array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 5, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]]))
+
# The same function can operate on a 4-d array:
a = zeros((3, 3, 3, 3), int)
fill_diagonal(a, 4)
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 58d8250a1..eab8f867a 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -210,19 +210,28 @@ def eye(N, M=None, k=0, dtype=float, maskna=False):
if M is None:
M = N
m = zeros((N, M), dtype=dtype, maskna=maskna)
- diagonal(m, k)[...] = 1
+ if k >= M:
+ return m
+ if k >= 0:
+ i = k
+ else:
+ i = (-k) * M
+ m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
- As of NumPy 1.7, extracting a diagonal always returns a view into `v`.
+ See the more detailed documentation for ``numpy.diagonal`` if you use this
+ function to extract a diagonal and wish to write to the resulting array;
+ whether it returns a copy or a view depends on what version of numpy you
+ are using.
Parameters
----------
v : array_like
- If `v` is a 2-D array, return a view of its `k`-th diagonal.
+ If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index a57cdf5c1..a6f252067 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -5718,6 +5718,12 @@ class MaskedConstant(MaskedArray):
def flatten(self):
return masked_array([self._data], dtype=float, mask=[True])
+ def __reduce__(self):
+ """Override of MaskedArray's __reduce__.
+ """
+ return (self.__class__, ())
+
+
masked = masked_singleton = MaskedConstant()
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 0175c1213..58f81b071 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -385,6 +385,16 @@ class TestMaskedArray(TestCase):
assert_equal(a_pickled, a)
self.assertTrue(isinstance(a_pickled._data, np.matrix))
+ def test_pickling_maskedconstant(self):
+ "Test pickling MaskedConstant"
+
+ import cPickle
+ mc = np.ma.masked
+ mc_pickled = cPickle.loads(mc.dumps())
+ assert_equal(mc_pickled._baseclass, mc._baseclass)
+ assert_equal(mc_pickled._mask, mc._mask)
+ assert_equal(mc_pickled._data, mc._data)
+
def test_pickling_wstructured(self):
"Tests pickling w/ structured array"
import cPickle
diff --git a/numpy/numarray/_capi.c b/numpy/numarray/_capi.c
index fee07d79d..78187c50e 100644
--- a/numpy/numarray/_capi.c
+++ b/numpy/numarray/_capi.c
@@ -1077,9 +1077,12 @@ NA_OutputArray(PyObject *a, NumarrayType t, int requires)
PyArray_Descr *dtype;
PyArrayObject *ret;
- if (!PyArray_Check(a) || !PyArray_ISWRITEABLE((PyArrayObject *)a)) {
+ if (!PyArray_Check(a)) {
PyErr_Format(PyExc_TypeError,
- "NA_OutputArray: only writeable arrays work for output.");
+ "NA_OutputArray: only arrays work for output.");
+ return NULL;
+ }
+ if (PyArray_FailUnlessWriteable((PyArrayObject *)a, "output array") < 0) {
return NULL;
}
@@ -1098,12 +1101,10 @@ NA_OutputArray(PyObject *a, NumarrayType t, int requires)
PyArray_DIMS((PyArrayObject *)a),
dtype, 0);
Py_INCREF(a);
- if (PyArray_SetBaseObject(ret, a) < 0) {
+ if (PyArray_SetUpdateIfCopyBase(ret, a) < 0) {
Py_DECREF(ret);
return NULL;
}
- PyArray_ENABLEFLAGS(ret, NPY_ARRAY_UPDATEIFCOPY);
- PyArray_CLEARFLAGS((PyArrayObject *)a, NPY_ARRAY_WRITEABLE);
return ret;
}
@@ -1127,9 +1128,7 @@ NA_IoArray(PyObject *a, NumarrayType t, int requires)
/* Guard against non-writable, but otherwise satisfying requires.
In this case, shadow == a.
*/
- if (!PyArray_ISWRITABLE(shadow)) {
- PyErr_Format(PyExc_TypeError,
- "NA_IoArray: I/O array must be writable array");
+ if (!PyArray_FailUnlessWriteable(shadow, "input/output array")) {
PyArray_XDECREF_ERR(shadow);
return NULL;
}
@@ -2488,13 +2487,10 @@ _setFromPythonScalarCore(PyArrayObject *a, long offset, PyObject*value, int entr
static int
NA_setFromPythonScalar(PyArrayObject *a, long offset, PyObject *value)
{
- if (PyArray_FLAGS(a) & NPY_ARRAY_WRITEABLE)
- return _setFromPythonScalarCore(a, offset, value, 0);
- else {
- PyErr_Format(
- PyExc_ValueError, "NA_setFromPythonScalar: assigment to readonly array buffer");
+ if (PyArray_FailUnlessWriteable(a, "array") < 0) {
return -1;
}
+ return _setFromPythonScalarCore(a, offset, value, 0);
}
diff --git a/numpy/version.py.in b/numpy/version.py.in
index ec9380595..e466c1ede 100644
--- a/numpy/version.py.in
+++ b/numpy/version.py.in
@@ -1,9 +1,8 @@
-# THIS FILE IS GENERATED FROM NUMPY BENTO SCRIPTS
-short_version = "$version"
-version = "$version"
-full_version = "$full_version"
-git_revision = "$git_revision"
-release = $is_released
+short_version = $VERSION
+version = $VERSION
+full_version = $FULL_VERSION
+git_revision = $GIT_REVISION
+release = $IS_RELEASED
if not release:
version = full_version