summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/release/1.10.0-notes.rst18
-rw-r--r--doc/source/reference/arrays.dtypes.rst12
-rw-r--r--numpy/core/src/multiarray/compiled_base.c191
-rw-r--r--numpy/core/src/multiarray/conversion_utils.c28
-rw-r--r--numpy/core/src/multiarray/descriptor.c8
-rw-r--r--numpy/core/tests/test_datetime.py10
-rw-r--r--numpy/core/tests/test_dtype.py45
-rw-r--r--numpy/distutils/ccompiler.py2
-rw-r--r--numpy/lib/stride_tricks.py4
9 files changed, 171 insertions, 147 deletions
diff --git a/doc/release/1.10.0-notes.rst b/doc/release/1.10.0-notes.rst
index 6a6bbd4c6..305fe0010 100644
--- a/doc/release/1.10.0-notes.rst
+++ b/doc/release/1.10.0-notes.rst
@@ -91,7 +91,7 @@ New Features
Reading extra flags from site.cfg
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Previously customization of compilation of dependency libraries and numpy
+Previously customization of compilation of dependency libraries and numpy
itself was only accomblishable via code changes in the distutils package.
Now numpy.distutils reads in the following extra flags from each group of the
*site.cfg*:
@@ -278,3 +278,19 @@ Because we are deprecating the ``bias`` argument to ``ma.corrcoef``, we also
deprecate the use of the ``allow_masked`` argument as a positional argument,
as its position will change with the removal of ``bias``. ``allow_masked``
will in due course become a keyword-only argument.
+
+dtype string representation changes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Since 1.6, creating a dtype object from its string representation, e.g.
+``'f4'``, would issue a deprecation warning if the size did not correspond
+to an existing type, and default to creating a dtype of the default size
+for the type. Starting with this release, this will now raise a ``TypeError``.
+
+The only exception is object dtypes, where both ``'O4'`` and ``'O8'`` will
+still issue a deprecation warning. This platform-dependent representation
+will raise an error in the next release.
+
+In preparation for this upcoming change, the string representation of an
+object dtype, i.e. ``np.dtype(object).str``, no longer includes the item
+size, i.e. will return ``'|O'`` instead of ``'|O4'`` or ``'|O8'`` as
+before.
diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst
index a43c23218..28e2d4f82 100644
--- a/doc/source/reference/arrays.dtypes.rst
+++ b/doc/source/reference/arrays.dtypes.rst
@@ -46,7 +46,7 @@ Structured data types are formed by creating a data type whose
which it can be :ref:`accessed <arrays.indexing.fields>`. The parent data
type should be of sufficient size to contain all its fields; the
parent is nearly always based on the :class:`void` type which allows
-an arbitrary item size. Structured data types may also contain nested
+an arbitrary item size. Structured data types may also contain nested
structured sub-array data types in their fields.
.. index::
@@ -219,10 +219,10 @@ One-character strings
Array-protocol type strings (see :ref:`arrays.interface`)
The first character specifies the kind of data and the remaining
- characters specify the number of bytes per item. The item size may
- be ignored for some kinds (i.e., boolean, object), rounded to the
- next supported size (float, complex), or interpreted as the number
- of characters (Unicode). The supported kinds are
+ characters specify the number of bytes per item, except for Unicode,
+ where it is interpreted as the number of characters. The item size
+ must correspond to an existing type, or an error will be raised. The
+ supported kinds are
================ ========================
``'b'`` boolean
@@ -431,7 +431,7 @@ Type strings
Both arguments must be convertible to data-type objects in this
case. The *base_dtype* is the data-type object that the new
data-type builds on. This is how you could assign named fields to
- any built-in data-type object, as done in
+ any built-in data-type object, as done in
:ref:`record arrays <arrays.classes.rec>`.
.. admonition:: Example
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index 825ca8c7a..aaedbac61 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -492,6 +492,8 @@ fail:
return NULL;
}
+#define LIKELY_IN_CACHE_SIZE 8
+
/** @brief find index of a sorted array such that arr[i] <= key < arr[i + 1].
*
* If an starting index guess is in-range, the array values around this
@@ -512,12 +514,13 @@ fail:
* @return index
*/
static npy_intp
-binary_search_with_guess(double key, double arr [], npy_intp len,
- npy_intp guess)
+binary_search_with_guess(const npy_double key, const npy_double *arr,
+ npy_intp len, npy_intp guess)
{
npy_intp imin = 0;
npy_intp imax = len;
+ /* Handle keys outside of the arr range first */
if (key > arr[len - 1]) {
return len;
}
@@ -525,35 +528,63 @@ binary_search_with_guess(double key, double arr [], npy_intp len,
return -1;
}
- if (guess < 0) {
- guess = 0;
- }
- else if (guess >= len - 1) {
- guess = len - 2;
- }
+ /*
+ * It would seem that for the following code to work, 'len' should
+ * at least be 4. But because of the way 'guess' is normalized, it
+ * will always be set to 1 if len <= 4. Given that, and that keys
+ * outside of the 'arr' bounds have already been handled, and the
+ * order in which comparisons happen below, it should become obvious
+ * that it will work with any array of at least 2 items.
+ */
+ assert (len >= 2);
- /* check most likely values: guess, guess + 1, guess - 1 */
- if ((key > arr[guess]) && (key <= arr[guess + 1])) {
- return guess;
- }
- else if ((guess < len - 2) && (key > arr[guess + 1]) &&
- (key <= arr[guess + 2])) {
- return guess + 1;
+ if (guess > len - 3) {
+ guess = len - 3;
}
- else if ((guess > 1) && (key > arr[guess - 1]) &&
- (key <= arr[guess])) {
- return guess - 1;
+ if (guess < 1) {
+ guess = 1;
}
- /* may be able to restrict bounds to range likely to be in memory */
- if ((guess > 8) && (key > arr[guess - 8])) {
- imin = guess - 8;
+
+ /* check most likely values: guess - 1, guess, guess + 1 */
+ if (key <= arr[guess]) {
+ if (key <= arr[guess - 1]) {
+ imax = guess - 1;
+ /* last attempt to restrict search to items in cache */
+ if (guess > LIKELY_IN_CACHE_SIZE &&
+ key > arr[guess - LIKELY_IN_CACHE_SIZE]) {
+ imin = guess - LIKELY_IN_CACHE_SIZE;
+ }
+ }
+ else {
+ /* key > arr[guess - 1] */
+ return guess - 1;
+ }
}
- if ((guess < len - 9) && (key <= arr[guess + 8])) {
- imax = guess + 8;
+ else {
+ /* key > arr[guess] */
+ if (key <= arr[guess + 1]) {
+ return guess;
+ }
+ else {
+ /* key > arr[guess + 1] */
+ if (key <= arr[guess + 2]) {
+ return guess + 1;
+ }
+ else {
+ /* key > arr[guess + 2] */
+ imin = guess + 2;
+ /* last attempt to restrict search to items in cache */
+ if (guess < len - LIKELY_IN_CACHE_SIZE - 1 &&
+ key <= arr[guess + LIKELY_IN_CACHE_SIZE]) {
+ imax = guess + LIKELY_IN_CACHE_SIZE;
+ }
+ }
+ }
}
+
/* finally, find index by bisection */
while (imin < imax) {
- npy_intp imid = imin + ((imax - imin) >> 1);
+ const npy_intp imid = imin + ((imax - imin) >> 1);
if (key >= arr[imid]) {
imin = imid + 1;
}
@@ -564,6 +595,8 @@ binary_search_with_guess(double key, double arr [], npy_intp len,
return imin - 1;
}
+#undef LIKELY_IN_CACHE_SIZE
+
NPY_NO_EXPORT PyObject *
arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
{
@@ -571,12 +604,15 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
PyObject *fp, *xp, *x;
PyObject *left = NULL, *right = NULL;
PyArrayObject *afp = NULL, *axp = NULL, *ax = NULL, *af = NULL;
- npy_intp i, lenx, lenxp, j, jprev;
- double lval, rval;
- double *dy, *dx, *dz, *dres, *slopes;
+ npy_intp i, lenx, lenxp;
+ npy_double lval, rval;
+ const npy_double *dy, *dx, *dz;
+ npy_double *dres, *slopes = NULL;
static char *kwlist[] = {"x", "xp", "fp", "left", "right", NULL};
+ NPY_BEGIN_THREADS_DEF;
+
if (!PyArg_ParseTupleAndKeywords(args, kwdict, "OOO|OO", kwlist,
&x, &xp, &fp, &left, &right)) {
return NULL;
@@ -594,29 +630,29 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
if (ax == NULL) {
goto fail;
}
- lenxp = PyArray_DIMS(axp)[0];
+ lenxp = PyArray_SIZE(axp);
if (lenxp == 0) {
PyErr_SetString(PyExc_ValueError,
"array of sample points is empty");
goto fail;
}
- if (PyArray_DIMS(afp)[0] != lenxp) {
+ if (PyArray_SIZE(afp) != lenxp) {
PyErr_SetString(PyExc_ValueError,
"fp and xp are not of the same length.");
goto fail;
}
af = (PyArrayObject *)PyArray_SimpleNew(PyArray_NDIM(ax),
- PyArray_DIMS(ax), NPY_DOUBLE);
+ PyArray_DIMS(ax), NPY_DOUBLE);
if (af == NULL) {
goto fail;
}
lenx = PyArray_SIZE(ax);
- dy = (double *)PyArray_DATA(afp);
- dx = (double *)PyArray_DATA(axp);
- dz = (double *)PyArray_DATA(ax);
- dres = (double *)PyArray_DATA(af);
+ dy = (const npy_double *)PyArray_DATA(afp);
+ dx = (const npy_double *)PyArray_DATA(axp);
+ dz = (const npy_double *)PyArray_DATA(ax);
+ dres = (npy_double *)PyArray_DATA(af);
/* Get left and right fill values. */
if ((left == NULL) || (left == Py_None)) {
lval = dy[0];
@@ -628,7 +664,7 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
}
}
if ((right == NULL) || (right == Py_None)) {
- rval = dy[lenxp-1];
+ rval = dy[lenxp - 1];
}
else {
rval = PyFloat_AsDouble(right);
@@ -637,72 +673,67 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
}
}
- /* only pre-calculate slopes if there are relatively few of them. */
- j = jprev = 0;
- if (lenxp <= lenx) {
- slopes = (double *) PyArray_malloc((lenxp - 1)*sizeof(double));
- if (! slopes) {
- goto fail;
- }
- NPY_BEGIN_ALLOW_THREADS;
- for (i = 0; i < lenxp - 1; i++) {
- slopes[i] = (dy[i + 1] - dy[i])/(dx[i + 1] - dx[i]);
+ /* binary_search_with_guess needs at least a 2 item long array */
+ if (lenxp == 1) {
+ const npy_double xp_val = dx[0];
+ const npy_double fp_val = dy[0];
+
+ NPY_BEGIN_THREADS_THRESHOLDED(lenx);
+ for (i = 0; i < lenx; ++i) {
+ const npy_double x_val = dz[i];
+ dres[i] = (x_val < xp_val) ? lval :
+ ((x_val > xp_val) ? rval : fp_val);
}
- for (i = 0; i < lenx; i++) {
- const double x = dz[i];
+ NPY_END_THREADS;
+ }
+ else {
+ npy_intp j = 0;
- if (npy_isnan(x)) {
- dres[i] = x;
- continue;
+ /* only pre-calculate slopes if there are relatively few of them. */
+ if (lenxp <= lenx) {
+ slopes = PyArray_malloc((lenxp - 1) * sizeof(npy_double));
+ if (slopes == NULL) {
+ goto fail;
}
+ }
- j = binary_search_with_guess(x, dx, lenxp, jprev);
- jprev = j;
- if (j == -1) {
- dres[i] = lval;
- }
- else if (j == lenxp - 1) {
- dres[i] = dy[j];
- }
- else if (j == lenxp) {
- dres[i] = rval;
- }
- else {
- dres[i] = slopes[j]*(x - dx[j]) + dy[j];
+ NPY_BEGIN_THREADS;
+
+ if (slopes != NULL) {
+ for (i = 0; i < lenxp - 1; ++i) {
+ slopes[i] = (dy[i+1] - dy[i]) / (dx[i+1] - dx[i]);
}
}
- NPY_END_ALLOW_THREADS;
- PyArray_free(slopes);
- }
- else {
- NPY_BEGIN_ALLOW_THREADS;
- for (i = 0; i < lenx; i++) {
- const double x = dz[i];
- if (npy_isnan(x)) {
- dres[i] = x;
+ for (i = 0; i < lenx; ++i) {
+ const npy_double x_val = dz[i];
+
+ if (npy_isnan(x_val)) {
+ dres[i] = x_val;
continue;
}
- j = binary_search_with_guess(x, dx, lenxp, jprev);
- jprev = j;
+ j = binary_search_with_guess(x_val, dx, lenxp, j);
if (j == -1) {
dres[i] = lval;
}
- else if (j == lenxp - 1) {
- dres[i] = dy[j];
- }
else if (j == lenxp) {
dres[i] = rval;
}
+ else if (j == lenxp - 1) {
+ dres[i] = dy[j];
+ }
else {
- const double slope = (dy[j + 1] - dy[j])/(dx[j + 1] - dx[j]);
- dres[i] = slope*(x - dx[j]) + dy[j];
+ const npy_double slope = (slopes != NULL) ? slopes[j] :
+ (dy[j+1] - dy[j]) / (dx[j+1] - dx[j]);
+ dres[i] = slope*(x_val - dx[j]) + dy[j];
}
}
- NPY_END_ALLOW_THREADS;
+
+ NPY_END_THREADS;
}
+ PyArray_free(slopes);
Py_DECREF(afp);
Py_DECREF(axp);
Py_DECREF(ax);
diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c
index 096a363f1..95241f36c 100644
--- a/numpy/core/src/multiarray/conversion_utils.c
+++ b/numpy/core/src/multiarray/conversion_utils.c
@@ -1023,9 +1023,6 @@ NPY_NO_EXPORT int
PyArray_TypestrConvert(int itemsize, int gentype)
{
int newtype = NPY_NOTYPE;
- PyArray_Descr *temp;
- const char *msg = "Specified size is invalid for this data type.\n"
- "Size will be ignored in NumPy 1.7 but may throw an exception in future versions.";
switch (gentype) {
case NPY_GENBOOLLTR:
@@ -1179,31 +1176,6 @@ PyArray_TypestrConvert(int itemsize, int gentype)
break;
}
- /*
- * Raise deprecate warning if new type hasn't been
- * set yet and size char is invalid.
- * This should eventually be changed to an error in
- * future NumPy versions.
- */
- if (newtype == NPY_NOTYPE) {
- temp = PyArray_DescrFromType(gentype);
- if (temp != NULL) {
- if (temp->elsize != itemsize) {
- if (DEPRECATE(msg) < 0) {
- Py_DECREF(temp);
- return -1;
- }
-
- newtype = gentype;
- }
- else {
- newtype = gentype;
- }
-
- Py_DECREF(temp);
- }
- }
-
return newtype;
}
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index a7953b44d..2bb45a6e0 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -1677,8 +1677,12 @@ arraydescr_protocol_typestr_get(PyArray_Descr *self)
if (self->type_num == NPY_UNICODE) {
size >>= 2;
}
-
- ret = PyUString_FromFormat("%c%c%d", endian, basic_, size);
+ if (self->type_num == NPY_OBJECT) {
+ ret = PyUString_FromFormat("%c%c", endian, basic_);
+ }
+ else {
+ ret = PyUString_FromFormat("%c%c%d", endian, basic_, size);
+ }
if (PyDataType_ISDATETIME(self)) {
PyArray_DatetimeMetaData *meta;
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index 4e432f885..d406d8ddf 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -50,11 +50,11 @@ class TestDateTime(TestCase):
assert_raises(TypeError, np.dtype, 'm8[badunit]')
assert_raises(TypeError, np.dtype, 'M8[YY]')
assert_raises(TypeError, np.dtype, 'm8[YY]')
- assert_warns(DeprecationWarning, np.dtype, 'm4')
- assert_warns(DeprecationWarning, np.dtype, 'M7')
- assert_warns(DeprecationWarning, np.dtype, 'm7')
- assert_warns(DeprecationWarning, np.dtype, 'M16')
- assert_warns(DeprecationWarning, np.dtype, 'm16')
+ assert_raises(TypeError, np.dtype, 'm4')
+ assert_raises(TypeError, np.dtype, 'M7')
+ assert_raises(TypeError, np.dtype, 'm7')
+ assert_raises(TypeError, np.dtype, 'M16')
+ assert_raises(TypeError, np.dtype, 'm16')
def test_datetime_casting_rules(self):
# Cannot cast safely/same_kind between timedelta and datetime
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 852660432..9040c262b 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -50,38 +50,35 @@ class TestBuiltin(TestCase):
self.assertTrue(hash(left) == hash(right))
def test_invalid_types(self):
- # Make sure invalid type strings raise a warning.
- # For now, display a deprecation warning for invalid
- # type sizes. In the future this should be changed
- # to an exception.
-
- assert_warns(DeprecationWarning, np.dtype, 'O3')
- assert_warns(DeprecationWarning, np.dtype, 'O5')
- assert_warns(DeprecationWarning, np.dtype, 'O7')
- assert_warns(DeprecationWarning, np.dtype, 'b3')
- assert_warns(DeprecationWarning, np.dtype, 'h4')
- assert_warns(DeprecationWarning, np.dtype, 'I5')
- assert_warns(DeprecationWarning, np.dtype, 'e3')
- assert_warns(DeprecationWarning, np.dtype, 'f5')
+ # Make sure invalid type strings raise an error
+
+ assert_raises(TypeError, np.dtype, 'O3')
+ assert_raises(TypeError, np.dtype, 'O5')
+ assert_raises(TypeError, np.dtype, 'O7')
+ assert_raises(TypeError, np.dtype, 'b3')
+ assert_raises(TypeError, np.dtype, 'h4')
+ assert_raises(TypeError, np.dtype, 'I5')
+ assert_raises(TypeError, np.dtype, 'e3')
+ assert_raises(TypeError, np.dtype, 'f5')
if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
- assert_warns(DeprecationWarning, np.dtype, 'g12')
+ assert_raises(TypeError, np.dtype, 'g12')
elif np.dtype('g').itemsize == 12:
- assert_warns(DeprecationWarning, np.dtype, 'g16')
+ assert_raises(TypeError, np.dtype, 'g16')
if np.dtype('l').itemsize == 8:
- assert_warns(DeprecationWarning, np.dtype, 'l4')
- assert_warns(DeprecationWarning, np.dtype, 'L4')
+ assert_raises(TypeError, np.dtype, 'l4')
+ assert_raises(TypeError, np.dtype, 'L4')
else:
- assert_warns(DeprecationWarning, np.dtype, 'l8')
- assert_warns(DeprecationWarning, np.dtype, 'L8')
+ assert_raises(TypeError, np.dtype, 'l8')
+ assert_raises(TypeError, np.dtype, 'L8')
if np.dtype('q').itemsize == 8:
- assert_warns(DeprecationWarning, np.dtype, 'q4')
- assert_warns(DeprecationWarning, np.dtype, 'Q4')
+ assert_raises(TypeError, np.dtype, 'q4')
+ assert_raises(TypeError, np.dtype, 'Q4')
else:
- assert_warns(DeprecationWarning, np.dtype, 'q8')
- assert_warns(DeprecationWarning, np.dtype, 'Q8')
+ assert_raises(TypeError, np.dtype, 'q8')
+ assert_raises(TypeError, np.dtype, 'Q8')
def test_bad_param(self):
# Can't give a size that's too small
@@ -319,7 +316,7 @@ class TestSubarray(TestCase):
# List gets converted
dt = np.dtype([('a', 'f4', l)])
assert_(isinstance(dt['a'].shape, tuple))
- #
+ #
class IntLike(object):
def __index__(self):
return 3
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index 1148c58c1..37d1e7b4b 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -532,7 +532,7 @@ compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler',
"Intel C Compiler for 64-bit applications")
compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW',
"Intel C Compiler for 32-bit applications on Windows")
-compiler_class['intelemw'] = ('intelem64tccompiler', 'IntelEM64TCCompilerW',
+compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW',
"Intel C Compiler for 64-bit applications on Windows")
compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler',
"PathScale Compiler for SiCortex-based applications")
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index e7649cb60..c05d56e2f 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -97,6 +97,10 @@ def broadcast_to(array, shape, subok=False):
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
+ Notes
+ -----
+ .. versionadded:: 1.10.0
+
Examples
--------
>>> x = np.array([1, 2, 3])