diff options
38 files changed, 376 insertions, 202 deletions
diff --git a/doc/neps/index.rst b/doc/neps/index.rst index e26190b1f..8a2df4078 100644 --- a/doc/neps/index.rst +++ b/doc/neps/index.rst @@ -6,8 +6,7 @@ NumPy Enhancement Proposals (NEPs) describe proposed changes to NumPy. NEPs are modeled on Python Enhancement Proposals (PEPs), and are typically written up when large changes to NumPy are proposed. -This page provides an overview of all NEPs, making only a distinction between -the ones that have been implemented and those that have not been implemented. +This page provides an overview of all NEPs. Meta-NEPs (NEPs about NEPs or Processes) ---------------------------------------- @@ -19,31 +18,39 @@ Meta-NEPs (NEPs about NEPs or Processes) nep-template +Accepted NEPs, implementation in progress +----------------------------------------- + +.. toctree:: + :maxdepth: 1 + + nep-0014-dropping-python2.7-proposal + + Implemented NEPs ---------------- .. toctree:: :maxdepth: 1 - ufunc-overrides - generalized-ufuncs - new-iterator-ufunc - npy-format + nep-0001-npy-format + nep-0005-generalized-ufuncs + nep-0007-datetime-proposal + nep-0010-new-iterator-ufunc + nep-0013-ufunc-overrides -Other NEPs ----------- +Defunct NEPs +------------ .. toctree:: :maxdepth: 1 - missing-data - math_config_clean - groupby_additions - warnfix - newbugtracker - deferred-ufunc-evaluation - structured_array_extensions - datetime-proposal - datetime-proposal3 - dropping-python2.7-proposal + nep-0002-warnfix + nep-0003-math_config_clean + nep-0004-datetime-proposal3 + nep-0006-newbugtracker + nep-0008-groupby_additions + nep-0009-structured_array_extensions + nep-0011-deferred-ufunc-evaluation + nep-0012-missing-data diff --git a/doc/neps/nep-0000.rst b/doc/neps/nep-0000.rst index bfcfac23b..ae8603c62 100644 --- a/doc/neps/nep-0000.rst +++ b/doc/neps/nep-0000.rst @@ -91,8 +91,14 @@ The possible paths of the status of NEPs are as follows: All NEPs should be created with the ``Draft`` status. -Normally, a NEP is ``Accepted`` by consensus of all -interested Contributors. +Normally, a NEP is ``Accepted`` by consensus of all interested +Contributors. To verify that consensus has been reached, the NEP +author or another interested party should make a post on the +numpy-discussion mailing list proposing it for acceptance; if there +are no substantive objections after one week, the NEP can officially +be marked ``Accepted``, and a link to this post should be added to the +NEP for reference. + In unusual cases, the `NumPy Steering Council`_ may be asked to decide whether a controversial NEP is ``Accepted``. diff --git a/doc/neps/npy-format.rst b/doc/neps/nep-0001-npy-format.rst index 3f12e1bf1..3f12e1bf1 100644 --- a/doc/neps/npy-format.rst +++ b/doc/neps/nep-0001-npy-format.rst diff --git a/doc/neps/warnfix.rst b/doc/neps/nep-0002-warnfix.rst index 4b0a2a56e..4b0a2a56e 100644 --- a/doc/neps/warnfix.rst +++ b/doc/neps/nep-0002-warnfix.rst diff --git a/doc/neps/math_config_clean.rst b/doc/neps/nep-0003-math_config_clean.rst index 27c0adfa1..27c0adfa1 100644 --- a/doc/neps/math_config_clean.rst +++ b/doc/neps/nep-0003-math_config_clean.rst diff --git a/doc/neps/datetime-proposal3.rst b/doc/neps/nep-0004-datetime-proposal3.rst index fcfb39e54..fcfb39e54 100644 --- a/doc/neps/datetime-proposal3.rst +++ b/doc/neps/nep-0004-datetime-proposal3.rst diff --git a/doc/neps/generalized-ufuncs.rst b/doc/neps/nep-0005-generalized-ufuncs.rst index 98e436990..98e436990 100644 --- a/doc/neps/generalized-ufuncs.rst +++ b/doc/neps/nep-0005-generalized-ufuncs.rst diff --git a/doc/neps/newbugtracker.rst b/doc/neps/nep-0006-newbugtracker.rst index 5af633552..5af633552 100644 --- a/doc/neps/newbugtracker.rst +++ b/doc/neps/nep-0006-newbugtracker.rst diff --git a/doc/neps/datetime-proposal.rst b/doc/neps/nep-0007-datetime-proposal.rst index 76c361f4f..76c361f4f 100644 --- a/doc/neps/datetime-proposal.rst +++ b/doc/neps/nep-0007-datetime-proposal.rst diff --git a/doc/neps/groupby_additions.rst b/doc/neps/nep-0008-groupby_additions.rst index a86bdd642..a86bdd642 100644 --- a/doc/neps/groupby_additions.rst +++ b/doc/neps/nep-0008-groupby_additions.rst diff --git a/doc/neps/structured_array_extensions.rst b/doc/neps/nep-0009-structured_array_extensions.rst index a4248362c..a4248362c 100644 --- a/doc/neps/structured_array_extensions.rst +++ b/doc/neps/nep-0009-structured_array_extensions.rst diff --git a/doc/neps/new-iterator-ufunc.rst b/doc/neps/nep-0010-new-iterator-ufunc.rst index 7a9e7627c..7a9e7627c 100644 --- a/doc/neps/new-iterator-ufunc.rst +++ b/doc/neps/nep-0010-new-iterator-ufunc.rst diff --git a/doc/neps/deferred-ufunc-evaluation.rst b/doc/neps/nep-0011-deferred-ufunc-evaluation.rst index b00c0dd2d..b00c0dd2d 100644 --- a/doc/neps/deferred-ufunc-evaluation.rst +++ b/doc/neps/nep-0011-deferred-ufunc-evaluation.rst diff --git a/doc/neps/missing-data.rst b/doc/neps/nep-0012-missing-data.rst index 00a6034f4..00a6034f4 100644 --- a/doc/neps/missing-data.rst +++ b/doc/neps/nep-0012-missing-data.rst diff --git a/doc/neps/ufunc-overrides.rst b/doc/neps/nep-0013-ufunc-overrides.rst index 90869e1ac..90869e1ac 100644 --- a/doc/neps/ufunc-overrides.rst +++ b/doc/neps/nep-0013-ufunc-overrides.rst diff --git a/doc/neps/dropping-python2.7-proposal.rst b/doc/neps/nep-0014-dropping-python2.7-proposal.rst index 3cfe50bd0..3cfe50bd0 100644 --- a/doc/neps/dropping-python2.7-proposal.rst +++ b/doc/neps/nep-0014-dropping-python2.7-proposal.rst diff --git a/doc/release/1.15.0-notes.rst b/doc/release/1.15.0-notes.rst index 87cb0f94e..4d19a953d 100644 --- a/doc/release/1.15.0-notes.rst +++ b/doc/release/1.15.0-notes.rst @@ -143,5 +143,16 @@ longer supported, as ``np.interp(object_array_nd)`` was never supported anyway. As a result of this change, the ``period`` argument can now be used on 0d arrays. +Allow dtype field names to be unicode in Python 2 +--------------------------------------------------------------- +Previously ``np.dtype([(u'name', float)])`` would raise a ``TypeError`` in +Python 2, as only bytestrings were allowed in field names. Now any unicode +string field names will be encoded with the ``ascii`` codec, raising a +``UnicodeEncodeError`` upon failure. + +This change makes it easier to write Python 2/3 compatible code using +``from __future__ import unicode_literals``, which previously would cause +string literal field names to raise a TypeError in Python 2. + Changes ======= diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index cbe95f51b..7dc73d6de 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -471,14 +471,15 @@ def _array2string(a, options, separator=' ', prefix=""): # The formatter __init__s in _get_format_function cannot deal with # subclasses yet, and we also need to avoid recursion issues in # _formatArray with subclasses which return 0d arrays in place of scalars - a = asarray(a) + data = asarray(a) + if a.shape == (): + a = data if a.size > options['threshold']: summary_insert = "..." - data = _leading_trailing(a, options['edgeitems']) + data = _leading_trailing(data, options['edgeitems']) else: summary_insert = "" - data = a # find the right formatting function for the array format_function = _get_format_function(data, **options) diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h index a1a30f7f0..b7634a930 100644 --- a/numpy/core/include/numpy/npy_common.h +++ b/numpy/core/include/numpy/npy_common.h @@ -120,13 +120,13 @@ #if defined(_MSC_VER) #define NPY_INLINE __inline #elif defined(__GNUC__) - #if defined(__STRICT_ANSI__) - #define NPY_INLINE __inline__ - #else - #define NPY_INLINE inline - #endif + #if defined(__STRICT_ANSI__) + #define NPY_INLINE __inline__ + #else + #define NPY_INLINE inline + #endif #else - #define NPY_INLINE + #define NPY_INLINE #endif #ifdef HAVE___THREAD diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py index 4604cc734..5be45affd 100644 --- a/numpy/core/memmap.py +++ b/numpy/core/memmap.py @@ -34,7 +34,7 @@ class memmap(ndarray): This class may at some point be turned into a factory function which returns a view into an mmap buffer. - Delete the memmap instance to close. + Delete the memmap instance to close the memmap file. Parameters diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c index f8305d115..ae4b81cf5 100644 --- a/numpy/core/src/multiarray/alloc.c +++ b/numpy/core/src/multiarray/alloc.c @@ -263,7 +263,7 @@ PyDataMem_RENEW(void *ptr, size_t size) result = realloc(ptr, size); if (result != ptr) { - PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); } PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); if (_PyDataMem_eventhook != NULL) { diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 12b51156e..56bd65eed 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -692,8 +692,8 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict) npy_intp i, lenx, lenxp; const npy_double *dx, *dz; - const npy_cdouble *dy; - npy_cdouble lval, rval; + const npy_cdouble *dy; + npy_cdouble lval, rval; npy_cdouble *dres, *slopes = NULL; static char *kwlist[] = {"x", "xp", "fp", "left", "right", NULL}; @@ -740,7 +740,7 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict) if (af == NULL) { goto fail; } - + dy = (const npy_cdouble *)PyArray_DATA(afp); dres = (npy_cdouble *)PyArray_DATA(af); /* Get left and right fill values. */ @@ -757,7 +757,7 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict) goto fail; } } - + if ((right == NULL) || (right == Py_None)) { rval = dy[lenxp - 1]; } @@ -771,12 +771,12 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict) goto fail; } } - + /* binary_search_with_guess needs at least a 3 item long array */ if (lenxp == 1) { const npy_double xp_val = dx[0]; const npy_cdouble fp_val = dy[0]; - + NPY_BEGIN_THREADS_THRESHOLDED(lenx); for (i = 0; i < lenx; ++i) { const npy_double x_val = dz[i]; @@ -787,7 +787,7 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict) } else { npy_intp j = 0; - + /* only pre-calculate slopes if there are relatively few of them. */ if (lenxp <= lenx) { slopes = PyArray_malloc((lenxp - 1) * sizeof(npy_cdouble)); @@ -795,9 +795,9 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict) goto fail; } } - + NPY_BEGIN_THREADS; - + if (slopes != NULL) { for (i = 0; i < lenxp - 1; ++i) { const double inv_dx = 1.0 / (dx[i+1] - dx[i]); @@ -805,16 +805,16 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict) slopes[i].imag = (dy[i+1].imag - dy[i].imag) * inv_dx; } } - + for (i = 0; i < lenx; ++i) { const npy_double x_val = dz[i]; - + if (npy_isnan(x_val)) { dres[i].real = x_val; dres[i].imag = 0.0; continue; } - + j = binary_search_with_guess(x_val, dx, lenxp, j); if (j == -1) { dres[i] = lval; @@ -833,17 +833,17 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict) else { const npy_double inv_dx = 1.0 / (dx[j+1] - dx[j]); dres[i].real = (dy[j+1].real - dy[j].real)*(x_val - dx[j])* - inv_dx + dy[j].real; + inv_dx + dy[j].real; dres[i].imag = (dy[j+1].imag - dy[j].imag)*(x_val - dx[j])* - inv_dx + dy[j].imag; + inv_dx + dy[j].imag; } } } - + NPY_END_THREADS; - } + } PyArray_free(slopes); - + Py_DECREF(afp); Py_DECREF(axp); Py_DECREF(ax); diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index b4a0ce37d..8d983ffc9 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -437,7 +437,7 @@ _convert_from_array_descr(PyObject *obj, int align) goto fail; } name = PyTuple_GET_ITEM(item, 0); - if (PyUString_Check(name)) { + if (PyBaseString_Check(name)) { title = NULL; } else if (PyTuple_Check(name)) { @@ -446,7 +446,7 @@ _convert_from_array_descr(PyObject *obj, int align) } title = PyTuple_GET_ITEM(name, 0); name = PyTuple_GET_ITEM(name, 1); - if (!PyUString_Check(name)) { + if (!PyBaseString_Check(name)) { goto fail; } } @@ -457,6 +457,17 @@ _convert_from_array_descr(PyObject *obj, int align) /* Insert name into nameslist */ Py_INCREF(name); +#if !defined(NPY_PY3K) + /* convert unicode name to ascii on Python 2 if possible */ + if (PyUnicode_Check(name)) { + PyObject *tmp = PyUnicode_AsASCIIString(name); + Py_DECREF(name); + if (tmp == NULL) { + goto fail; + } + name = tmp; + } +#endif if (PyUString_GET_SIZE(name) == 0) { Py_DECREF(name); if (title == NULL) { diff --git a/numpy/core/src/npymath/ieee754.c.src b/numpy/core/src/npymath/ieee754.c.src index 0370ea6c7..bca690b4d 100644 --- a/numpy/core/src/npymath/ieee754.c.src +++ b/numpy/core/src/npymath/ieee754.c.src @@ -161,22 +161,22 @@ typedef union /* Get two 64 bit ints from a long double. */ -#define GET_LDOUBLE_WORDS64(ix0,ix1,d) \ -do { \ - ieee854_long_double_shape_type qw_u; \ - qw_u.value = (d); \ - (ix0) = qw_u.parts64.msw; \ - (ix1) = qw_u.parts64.lsw; \ +#define GET_LDOUBLE_WORDS64(ix0,ix1,d) \ +do { \ + ieee854_long_double_shape_type qw_u; \ + qw_u.value = (d); \ + (ix0) = qw_u.parts64.msw; \ + (ix1) = qw_u.parts64.lsw; \ } while (0) /* Set a long double from two 64 bit ints. */ -#define SET_LDOUBLE_WORDS64(d,ix0,ix1) \ -do { \ - ieee854_long_double_shape_type qw_u; \ - qw_u.parts64.msw = (ix0); \ - qw_u.parts64.lsw = (ix1); \ - (d) = qw_u.value; \ +#define SET_LDOUBLE_WORDS64(d,ix0,ix1) \ +do { \ + ieee854_long_double_shape_type qw_u; \ + qw_u.parts64.msw = (ix0); \ + qw_u.parts64.lsw = (ix1); \ + (d) = qw_u.value; \ } while (0) static npy_longdouble _nextl(npy_longdouble x, int p) diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py index 88aaa3403..309df8545 100644 --- a/numpy/core/tests/test_arrayprint.py +++ b/numpy/core/tests/test_arrayprint.py @@ -5,7 +5,7 @@ import sys, gc import numpy as np from numpy.testing import ( - run_module_suite, assert_, assert_equal, assert_raises, assert_warns + run_module_suite, assert_, assert_equal, assert_raises, assert_warns, dec ) import textwrap @@ -34,6 +34,27 @@ class TestArrayRepr(object): " [(1,), (1,)]], dtype=[('a', '<i4')])" ) + @dec.knownfailureif(True, "See gh-10544") + def test_object_subclass(self): + class sub(np.ndarray): + def __new__(cls, inp): + obj = np.asarray(inp).view(cls) + return obj + + def __getitem__(self, ind): + ret = super(sub, self).__getitem__(ind) + return sub(ret) + + # test that object + subclass is OK: + x = sub([None, None]) + assert_equal(repr(x), 'sub([None, None], dtype=object)') + assert_equal(str(x), '[None None]') + + x = sub([None, sub([None, None])]) + assert_equal(repr(x), + 'sub([None, sub([None, None], dtype=object)], dtype=object)') + assert_equal(str(x), '[None sub([None, None], dtype=object)]') + def test_0d_object_subclass(self): # make sure that subclasses which return 0ds instead # of scalars don't cause infinite recursion in str @@ -73,15 +94,27 @@ class TestArrayRepr(object): assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)') assert_equal(str(x), 'None') - # test that object + subclass is OK: - x = sub([None, None]) - assert_equal(repr(x), 'sub([None, None], dtype=object)') - assert_equal(str(x), '[None None]') - - x = sub([None, sub([None, None])]) - assert_equal(repr(x), - 'sub([None, sub([None, None], dtype=object)], dtype=object)') - assert_equal(str(x), '[None sub([None, None], dtype=object)]') + # gh-10663 + class DuckCounter(np.ndarray): + def __getitem__(self, item): + result = super(DuckCounter, self).__getitem__(item) + if not isinstance(result, DuckCounter): + result = result[...].view(DuckCounter) + return result + + def to_string(self): + return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many') + + def __str__(self): + if self.shape == (): + return self.to_string() + else: + fmt = {'all': lambda x: x.to_string()} + return np.array2string(self, formatter=fmt) + + dc = np.arange(5).view(DuckCounter) + assert_equal(str(dc), "[zero one two many many]") + assert_equal(str(dc[0]), "zero") def test_self_containing(self): arr0d = np.array(None) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index fba169ebf..43bfb0635 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -4693,10 +4693,15 @@ class TestRecord(object): y['a'] def test_unicode_field_names(self): - # Unicode field names are not allowed on Py2 - title = u'b' - assert_raises(TypeError, np.dtype, [(title, int)]) - assert_raises(TypeError, np.dtype, [(('a', title), int)]) + # Unicode field names are converted to ascii on Python 2: + encodable_name = u'b' + assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b') + assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b') + + # But raises UnicodeEncodeError if it can't be encoded: + nonencodable_name = u'\uc3bc' + assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)]) + assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)]) def test_field_names(self): # Test unicode and 8-bit / byte strings can be used diff --git a/numpy/distutils/mingw/gfortran_vs2003_hack.c b/numpy/distutils/mingw/gfortran_vs2003_hack.c index 15ed7e686..485a675d8 100644 --- a/numpy/distutils/mingw/gfortran_vs2003_hack.c +++ b/numpy/distutils/mingw/gfortran_vs2003_hack.c @@ -1,6 +1,6 @@ int _get_output_format(void) { - return 0; + return 0; } int _imp____lc_codepage = 0; diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 78802ef07..dc560f98e 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -346,8 +346,6 @@ def readfortrancode(ffile, dowithline=show, istop=1): cont = 0 finalline = '' ll = '' - commentline = re.compile( - r'(?P<line>([^"]*["][^"]*["][^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!\'"]*))!{1}(?P<rest>.*)') includeline = re.compile( r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I) cont1 = re.compile(r'(?P<line>.*)&\s*\Z') @@ -391,17 +389,10 @@ def readfortrancode(ffile, dowithline=show, istop=1): break l = l[:-1] if not strictf77: - r = commentline.match(l) - if r: - l = r.group('line') + ' ' # Strip comments starting with `!' - rl = r.group('rest') - if rl[:4].lower() == 'f2py': # f2py directive - l = l + 4 * ' ' - r = commentline.match(rl[4:]) - if r: - l = l + r.group('line') - else: - l = l + rl[4:] + (l, rl) = split_by_unquoted(l, '!') + l += ' ' + if rl[:5].lower() == '!f2py': # f2py directive + l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') if l.strip() == '': # Skip empty line cont = 0 continue @@ -618,6 +609,25 @@ multilinepattern = re.compile( r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z", re.S), 'multiline' ## +def split_by_unquoted(line, characters): + """ + Splits the line into (line[:i], line[i:]), + where i is the index of first occurence of one of the characters + not within quotes, or len(line) if no such index exists + """ + assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes" + r = re.compile( + r"\A(?P<before>({single_quoted}|{double_quoted}|{not_quoted})*)" + r"(?P<after>{char}.*)\Z".format( + not_quoted="[^\"'{}]".format(re.escape(characters)), + char="[{}]".format(re.escape(characters)), + single_quoted=r"('([^'\\]|(\\.))*')", + double_quoted=r'("([^"\\]|(\\.))*")')) + m = r.match(line) + if m: + d = m.groupdict() + return (d["before"], d["after"]) + return (line, "") def _simplifyargs(argsline): a = [] @@ -642,12 +652,17 @@ def crackline(line, reset=0): global filepositiontext, currentfilename, neededmodule, expectbegin global skipblocksuntil, skipemptyends, previous_context, gotnextfile - if ';' in line and not (f2pyenhancementspattern[0].match(line) or - multilinepattern[0].match(line)): - for l in line.split(';'): - # XXX: non-zero reset values need testing - assert reset == 0, repr(reset) - crackline(l, reset) + _, has_semicolon = split_by_unquoted(line, ";") + if has_semicolon and not (f2pyenhancementspattern[0].match(line) or + multilinepattern[0].match(line)): + # XXX: non-zero reset values need testing + assert reset == 0, repr(reset) + # split line on unquoted semicolons + line, semicolon_line = split_by_unquoted(line, ";") + while semicolon_line: + crackline(line, reset) + line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";") + crackline(line, reset) return if reset < 0: groupcounter = 0 @@ -802,26 +817,22 @@ def markouterparen(line): def markoutercomma(line, comma=','): l = '' f = 0 - cc = '' - for c in line: - if (not cc or cc == ')') and c == '(': - f = f + 1 - cc = ')' - elif not cc and c == '\'' and (not l or l[-1] != '\\'): - f = f + 1 - cc = '\'' - elif c == cc: - f = f - 1 - if f == 0: - cc = '' - elif c == comma and f == 0: - l = l + '@' + comma + '@' - continue - l = l + c - assert not f, repr((f, line, l, cc)) + before, after = split_by_unquoted(line, comma + '()') + l += before + while after: + if (after[0] == comma) and (f == 0): + l += '@' + comma + '@' + else: + l += after[0] + if after[0] == '(': + f += 1 + elif after[0] == ')': + f -= 1 + before, after = split_by_unquoted(after[1:], comma + '()') + l += before + assert not f, repr((f, line, l)) return l - def unmarkouterparen(line): r = line.replace('@(@', '(').replace('@)@', ')') return r diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index a47733433..dd2484eb4 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -241,7 +241,7 @@ fortran_doc(FortranDataDef def) static FortranDataDef *save_def; /* save pointer of an allocatable array */ static void set_data(char *d,npy_intp *f) { /* callback from Fortran */ - if (*f) /* In fortran f=allocated(d) */ + if (*f) /* In fortran f=allocated(d) */ save_def->data = d; else save_def->data = NULL; @@ -439,23 +439,23 @@ PyTypeObject PyFortran_Type = { PyVarObject_HEAD_INIT(NULL, 0) #else PyObject_HEAD_INIT(0) - 0, /*ob_size*/ + 0, /*ob_size*/ #endif "fortran", /*tp_name*/ sizeof(PyFortranObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ + 0, /*tp_itemsize*/ /* methods */ - (destructor)fortran_dealloc, /*tp_dealloc*/ - 0, /*tp_print*/ + (destructor)fortran_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ (getattrfunc)fortran_getattr, /*tp_getattr*/ (setattrfunc)fortran_setattr, /*tp_setattr*/ - 0, /*tp_compare/tp_reserved*/ - (reprfunc)fortran_repr, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - (ternaryfunc)fortran_call, /*tp_call*/ + 0, /*tp_compare/tp_reserved*/ + (reprfunc)fortran_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + (ternaryfunc)fortran_call, /*tp_call*/ }; /************************* f2py_report_atexit *******************************/ @@ -656,17 +656,18 @@ PyArrayObject* array_from_pyobj(const int type_num, const int rank, const int intent, PyObject *obj) { - /* Note about reference counting - ----------------------------- - If the caller returns the array to Python, it must be done with - Py_BuildValue("N",arr). - Otherwise, if obj!=arr then the caller must call Py_DECREF(arr). - - Note on intent(cache,out,..) - --------------------- - Don't expect correct data when returning intent(cache) array. - - */ + /* + * Note about reference counting + * ----------------------------- + * If the caller returns the array to Python, it must be done with + * Py_BuildValue("N",arr). + * Otherwise, if obj!=arr then the caller must call Py_DECREF(arr). + * + * Note on intent(cache,out,..) + * --------------------- + * Don't expect correct data when returning intent(cache) array. + * + */ char mess[200]; PyArrayObject *arr = NULL; PyArray_Descr *descr; @@ -744,17 +745,17 @@ PyArrayObject* array_from_pyobj(const int type_num, if (check_and_fix_dimensions(arr, rank, dims)) { return NULL; } - /* - printf("intent alignment=%d\n", F2PY_GET_ALIGNMENT(intent)); - printf("alignment check=%d\n", F2PY_CHECK_ALIGNMENT(arr, intent)); - int i; - for (i=1;i<=16;i++) - printf("i=%d isaligned=%d\n", i, ARRAY_ISALIGNED(arr, i)); - */ + /* + printf("intent alignment=%d\n", F2PY_GET_ALIGNMENT(intent)); + printf("alignment check=%d\n", F2PY_CHECK_ALIGNMENT(arr, intent)); + int i; + for (i=1;i<=16;i++) + printf("i=%d isaligned=%d\n", i, ARRAY_ISALIGNED(arr, i)); + */ if ((! (intent & F2PY_INTENT_COPY)) && PyArray_ITEMSIZE(arr)==elsize && ARRAY_ISCOMPATIBLE(arr,type_num) - && F2PY_CHECK_ALIGNMENT(arr, intent) + && F2PY_CHECK_ALIGNMENT(arr, intent) ) { if ((intent & F2PY_INTENT_C)?PyArray_ISCARRAY(arr):PyArray_ISFARRAY(arr)) { if ((intent & F2PY_INTENT_OUT)) { @@ -780,8 +781,8 @@ PyArrayObject* array_from_pyobj(const int type_num, if (!(ARRAY_ISCOMPATIBLE(arr,type_num))) sprintf(mess+strlen(mess)," -- input '%c' not compatible to '%c'", PyArray_DESCR(arr)->type,typechar); - if (!(F2PY_CHECK_ALIGNMENT(arr, intent))) - sprintf(mess+strlen(mess)," -- input not %d-aligned", F2PY_GET_ALIGNMENT(intent)); + if (!(F2PY_CHECK_ALIGNMENT(arr, intent))) + sprintf(mess+strlen(mess)," -- input not %d-aligned", F2PY_GET_ALIGNMENT(intent)); PyErr_SetString(PyExc_ValueError,mess); return NULL; } @@ -858,14 +859,14 @@ static int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp *dims) { /* - This function fills in blanks (that are -1's) in dims list using - the dimensions from arr. It also checks that non-blank dims will - match with the corresponding values in arr dimensions. - - Returns 0 if the function is successful. - - If an error condition is detected, an exception is set and 1 is returned. - */ + * This function fills in blanks (that are -1's) in dims list using + * the dimensions from arr. It also checks that non-blank dims will + * match with the corresponding values in arr dimensions. + * + * Returns 0 if the function is successful. + * + * If an error condition is detected, an exception is set and 1 is returned. + */ const npy_intp arr_size = (PyArray_NDIM(arr))?PyArray_Size((PyObject *)arr):1; #ifdef DEBUG_COPY_ND_ARRAY dump_attrs(arr); @@ -922,7 +923,7 @@ int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp int i; npy_intp d; for (i=0; i<rank; ++i) { - d = PyArray_DIM(arr,i); + d = PyArray_DIM(arr,i); if (dims[i]>=0) { if (d > 1 && d!=dims[i]) { PyErr_Format(PyExc_ValueError, diff --git a/numpy/f2py/src/fortranobject.h b/numpy/f2py/src/fortranobject.h index c9b54e259..5d0dcf676 100644 --- a/numpy/f2py/src/fortranobject.h +++ b/numpy/f2py/src/fortranobject.h @@ -78,16 +78,16 @@ typedef void *(*f2pycfunc)(void); typedef struct { char *name; /* attribute (array||routine) name */ int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, - || rank=-1 for Fortran routine */ + || rank=-1 for Fortran routine */ struct {npy_intp d[F2PY_MAX_DIMS];} dims; /* dimensions of the array, || not used */ int type; /* PyArray_<type> || not used */ char *data; /* pointer to array || Fortran routine */ - f2py_init_func func; /* initialization function for - allocatable arrays: - func(&rank,dims,set_ptr_func,name,len(name)) - || C/API wrapper for Fortran routine */ + f2py_init_func func; /* initialization function for + allocatable arrays: + func(&rank,dims,set_ptr_func,name,len(name)) + || C/API wrapper for Fortran routine */ char *doc; /* documentation string; only recommended - for routines. */ + for routines. */ } FortranDataDef; typedef struct { @@ -139,16 +139,16 @@ int F2PyCapsule_Check(PyObject *ptr); #define F2PY_ALIGN16(intent) (intent & F2PY_INTENT_ALIGNED16) #define F2PY_GET_ALIGNMENT(intent) \ - (F2PY_ALIGN4(intent) ? 4 : \ - (F2PY_ALIGN8(intent) ? 8 : \ - (F2PY_ALIGN16(intent) ? 16 : 1) )) + (F2PY_ALIGN4(intent) ? 4 : \ + (F2PY_ALIGN8(intent) ? 8 : \ + (F2PY_ALIGN16(intent) ? 16 : 1) )) #define F2PY_CHECK_ALIGNMENT(arr, intent) ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) extern PyArrayObject* array_from_pyobj(const int type_num, - npy_intp *dims, - const int rank, - const int intent, - PyObject *obj); + npy_intp *dims, + const int rank, + const int intent, + PyObject *obj); extern int copy_ND_array(const PyArrayObject *in, PyArrayObject *out); #ifdef DEBUG_COPY_ND_ARRAY diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index 22801abdc..7f46303b0 100644 --- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -33,7 +33,7 @@ Required arguments:\n" "Return objects:\n" " arr : array"; static PyObject *f2py_rout_wrap_call(PyObject *capi_self, - PyObject *capi_args) { + PyObject *capi_args) { PyObject * volatile capi_buildvalue = NULL; int type_num = 0; npy_intp *dims = NULL; @@ -45,7 +45,7 @@ static PyObject *f2py_rout_wrap_call(PyObject *capi_self, int i; if (!PyArg_ParseTuple(capi_args,"iOiO|:wrap.call",\ - &type_num,&dims_capi,&intent,&arr_capi)) + &type_num,&dims_capi,&intent,&arr_capi)) return NULL; rank = PySequence_Length(dims_capi); dims = malloc(rank*sizeof(npy_intp)); @@ -78,7 +78,7 @@ Required arguments:\n" " itemsize : int\n" ; static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self, - PyObject *capi_args) { + PyObject *capi_args) { PyObject *arr_capi = Py_None; PyArrayObject *arr = NULL; PyObject *dimensions = NULL; @@ -87,7 +87,7 @@ static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self, int i; memset(s,0,100*sizeof(char)); if (!PyArg_ParseTuple(capi_args,"O!|:wrap.attrs", - &PyArray_Type,&arr_capi)) + &PyArray_Type,&arr_capi)) return NULL; arr = (PyArrayObject *)arr_capi; sprintf(s,"%p",PyArray_DATA(arr)); @@ -98,15 +98,15 @@ static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self, PyTuple_SetItem(strides,i,PyInt_FromLong(PyArray_STRIDE(arr,i))); } return Py_BuildValue("siOOO(cciii)ii",s,PyArray_NDIM(arr), - dimensions,strides, - (PyArray_BASE(arr)==NULL?Py_None:PyArray_BASE(arr)), - PyArray_DESCR(arr)->kind, - PyArray_DESCR(arr)->type, - PyArray_TYPE(arr), - PyArray_ITEMSIZE(arr), - PyArray_DESCR(arr)->alignment, - PyArray_FLAGS(arr), - PyArray_ITEMSIZE(arr)); + dimensions,strides, + (PyArray_BASE(arr)==NULL?Py_None:PyArray_BASE(arr)), + PyArray_DESCR(arr)->kind, + PyArray_DESCR(arr)->type, + PyArray_TYPE(arr), + PyArray_ITEMSIZE(arr), + PyArray_DESCR(arr)->alignment, + PyArray_FLAGS(arr), + PyArray_ITEMSIZE(arr)); } static PyMethodDef f2py_module_methods[] = { diff --git a/numpy/f2py/tests/test_quoted_character.py b/numpy/f2py/tests/test_quoted_character.py new file mode 100644 index 000000000..4770c11c4 --- /dev/null +++ b/numpy/f2py/tests/test_quoted_character.py @@ -0,0 +1,32 @@ +from __future__ import division, absolute_import, print_function + +from . import util + +from numpy.testing import run_module_suite, assert_equal, dec + +import sys + +class TestQuotedCharacter(util.F2PyTest): + code = """ + SUBROUTINE FOO(OUT1, OUT2, OUT3, OUT4, OUT5, OUT6) + CHARACTER SINGLE, DOUBLE, SEMICOL, EXCLA, OPENPAR, CLOSEPAR + PARAMETER (SINGLE="'", DOUBLE='"', SEMICOL=';', EXCLA="!", + 1 OPENPAR="(", CLOSEPAR=")") + CHARACTER OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 +Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 + OUT1 = SINGLE + OUT2 = DOUBLE + OUT3 = SEMICOL + OUT4 = EXCLA + OUT5 = OPENPAR + OUT6 = CLOSEPAR + RETURN + END + """ + + @dec.knownfailureif(sys.platform=='win32', msg='Fails with MinGW64 Gfortran (Issue #9673)') + def test_quoted_character(self): + assert_equal(self.module.foo(), (b"'", b'"', b';', b'!', b'(', b')')) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy/f2py/tests/test_semicolon_split.py b/numpy/f2py/tests/test_semicolon_split.py new file mode 100644 index 000000000..2b0f32727 --- /dev/null +++ b/numpy/f2py/tests/test_semicolon_split.py @@ -0,0 +1,53 @@ +from __future__ import division, absolute_import, print_function + +from . import util +from numpy.testing import assert_equal + +class TestMultiline(util.F2PyTest): + suffix = ".pyf" + module_name = "multiline" + code = """ +python module {module} + usercode ''' +void foo(int* x) {{ + char dummy = ';'; + *x = 42; +}} +''' + interface + subroutine foo(x) + intent(c) foo + integer intent(out) :: x + end subroutine foo + end interface +end python module {module} + """.format(module=module_name) + + def test_multiline(self): + assert_equal(self.module.foo(), 42) + +class TestCallstatement(util.F2PyTest): + suffix = ".pyf" + module_name = "callstatement" + code = """ +python module {module} + usercode ''' +void foo(int* x) {{ +}} +''' + interface + subroutine foo(x) + intent(c) foo + integer intent(out) :: x + callprotoargument int* + callstatement {{ & + ; & + x = 42; & + }} + end subroutine foo + end interface +end python module {module} + """.format(module=module_name) + + def test_callstatement(self): + assert_equal(self.module.foo(), 42) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index e61122107..422a87322 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -632,7 +632,7 @@ def select(condlist, choicelist, default=0): deprecated_ints = True else: raise ValueError( - 'invalid entry in choicelist: should be boolean ndarray') + 'invalid entry {} in condlist: should be boolean ndarray'.format(i)) if deprecated_ints: # 2014-02-24, 1.9 @@ -818,9 +818,9 @@ def gradient(f, *varargs, **kwargs): Notes ----- Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous - derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the - spacing the finite difference coefficients are computed by minimising - the consistency error :math:`\\eta_{i}`: + derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we + minimize the "consistency error" :math:`\\eta_{i}` between the true gradient + and its estimate from a linear combination of the neighboring grid-points: .. math:: diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py index fbdc2edfb..3220f6534 100644 --- a/numpy/lib/mixins.py +++ b/numpy/lib/mixins.py @@ -137,6 +137,8 @@ class NDArrayOperatorsMixin(object): Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations with arbitrary, unrecognized types. This ensures that interactions with ArrayLike preserve a well-defined casting hierarchy. + + .. versionadded:: 1.13 """ # Like np.ndarray, this mixin class implements "Option 1" from the ufunc # overrides NEP. diff --git a/numpy/linalg/lapack_litemodule.c b/numpy/linalg/lapack_litemodule.c index f321d6a6f..bdde2e22d 100644 --- a/numpy/linalg/lapack_litemodule.c +++ b/numpy/linalg/lapack_litemodule.c @@ -153,9 +153,9 @@ lapack_lite_dgeqrf(PyObject *NPY_UNUSED(self), PyObject *args) lapack_lite_status = FNAME(dgeqrf)(&m, &n, DDATA(a), &lda, DDATA(tau), DDATA(work), &lwork, &info); - if (PyErr_Occurred()) { + if (PyErr_Occurred()) { return NULL; - } + } return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","dgeqrf_", lapack_lite_status,"m",m,"n",n,"lda",lda, @@ -179,9 +179,9 @@ lapack_lite_dorgqr(PyObject *NPY_UNUSED(self), PyObject *args) lapack_lite_status = FNAME(dorgqr)(&m, &n, &k, DDATA(a), &lda, DDATA(tau), DDATA(work), &lwork, &info); - if (PyErr_Occurred()) { + if (PyErr_Occurred()) { return NULL; - } + } return Py_BuildValue("{s:i,s:i}","dorgqr_",lapack_lite_status, "info",info); @@ -249,9 +249,9 @@ lapack_lite_zgeqrf(PyObject *NPY_UNUSED(self), PyObject *args) lapack_lite_status = FNAME(zgeqrf)(&m, &n, ZDATA(a), &lda, ZDATA(tau), ZDATA(work), &lwork, &info); - if (PyErr_Occurred()) { + if (PyErr_Occurred()) { return NULL; - } + } return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","zgeqrf_",lapack_lite_status,"m",m,"n",n,"lda",lda,"lwork",lwork,"info",info); } @@ -275,9 +275,9 @@ lapack_lite_zungqr(PyObject *NPY_UNUSED(self), PyObject *args) lapack_lite_status = FNAME(zungqr)(&m, &n, &k, ZDATA(a), &lda, ZDATA(tau), ZDATA(work), &lwork, &info); - if (PyErr_Occurred()) { + if (PyErr_Occurred()) { return NULL; - } + } return Py_BuildValue("{s:i,s:i}","zungqr_",lapack_lite_status, "info",info); diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src index 3c30982a7..0248518ac 100644 --- a/numpy/linalg/umath_linalg.c.src +++ b/numpy/linalg/umath_linalg.c.src @@ -1373,7 +1373,7 @@ init_@lapack_func@(EIGH_PARAMS_t *params, fortran_int lda = fortran_int_max(N, 1); mem_buff = malloc(safe_N * safe_N * sizeof(@typ@) + - safe_N * sizeof(@basetyp@)); + safe_N * sizeof(@basetyp@)); if (!mem_buff) { goto error; } diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index 501c1e5b3..16d649c4a 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -4514,12 +4514,11 @@ cdef class RandomState: # covariance. Note that sqrt(s)*v where (u,s,v) is the singular value # decomposition of cov is such an A. # - # Also check that cov is positive-semidefinite. If so, the u.T and v + # Also check that cov is symmetric positive-semidefinite. If so, the u.T and v # matrices should be equal up to roundoff error if cov is - # symmetrical and the singular value of the corresponding row is + # symmetric and the singular value of the corresponding row is # not zero. We continue to use the SVD rather than Cholesky in - # order to preserve current outputs. Note that symmetry has not - # been checked. + # order to preserve current outputs. (u, s, v) = svd(cov) @@ -4530,10 +4529,12 @@ cdef class RandomState: psd = np.allclose(np.dot(v.T * s, v), cov, rtol=tol, atol=tol) if not psd: if check_valid == 'warn': - warnings.warn("covariance is not positive-semidefinite.", - RuntimeWarning) + warnings.warn( + "covariance is not symmetric positive-semidefinite.", + RuntimeWarning) else: - raise ValueError("covariance is not positive-semidefinite.") + raise ValueError( + "covariance is not symmetric positive-semidefinite.") x = np.dot(x, np.sqrt(s)[:, None] * v) x += mean |
