diff options
Diffstat (limited to 'numpy/core')
99 files changed, 4831 insertions, 2460 deletions
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py index 004c2762b..8c6596d13 100644 --- a/numpy/core/_internal.py +++ b/numpy/core/_internal.py @@ -110,6 +110,10 @@ def _array_descr(descriptor): num = field[1] - offset result.append(('', '|V%d' % num)) offset += num + elif field[1] < offset: + raise ValueError( + "dtype.descr is not defined for types with overlapping or " + "out-of-order fields") if len(field) > 3: name = (field[2], field[3]) else: diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py index c05316d18..0f928676b 100644 --- a/numpy/core/_methods.py +++ b/numpy/core/_methods.py @@ -142,3 +142,10 @@ def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): ret = um.sqrt(ret) return ret + +def _ptp(a, axis=None, out=None, keepdims=False): + return um.subtract( + umr_maximum(a, axis, None, out, keepdims), + umr_minimum(a, axis, None, None, keepdims), + out + ) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 2706d16f0..cbe95f51b 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -6,8 +6,8 @@ $Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ from __future__ import division, absolute_import, print_function __all__ = ["array2string", "array_str", "array_repr", "set_string_function", - "set_printoptions", "get_printoptions", "format_float_positional", - "format_float_scientific"] + "set_printoptions", "get_printoptions", "printoptions", + "format_float_positional", "format_float_scientific"] __docformat__ = 'restructuredtext' # @@ -39,22 +39,17 @@ else: import numpy as np from . import numerictypes as _nt -from .umath import absolute, not_equal, isnan, isinf +from .umath import absolute, not_equal, isnan, isinf, isfinite, isnat from . import multiarray from .multiarray import (array, dragon4_positional, dragon4_scientific, - datetime_as_string, datetime_data, dtype, ndarray) + datetime_as_string, datetime_data, dtype, ndarray, + set_legacy_print_mode) from .fromnumeric import ravel, any from .numeric import concatenate, asarray, errstate from .numerictypes import (longlong, intc, int_, float_, complex_, bool_, flexible) import warnings - -if sys.version_info[0] >= 3: - _MAXINT = sys.maxsize - _MININT = -sys.maxsize - 1 -else: - _MAXINT = sys.maxint - _MININT = -sys.maxint - 1 +import contextlib _format_options = { 'edgeitems': 3, # repr N leading and trailing items of each dimension @@ -66,11 +61,12 @@ _format_options = { 'nanstr': 'nan', 'infstr': 'inf', 'sign': '-', - 'formatter': None } + 'formatter': None, + 'legacy': False} def _make_options_dict(precision=None, threshold=None, edgeitems=None, linewidth=None, suppress=None, nanstr=None, infstr=None, - sign=None, formatter=None, floatmode=None): + sign=None, formatter=None, floatmode=None, legacy=None): """ make a dictionary out of the non-None arguments, plus sanity checks """ options = {k: v for k, v in locals().items() if v is not None} @@ -78,20 +74,23 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, if suppress is not None: options['suppress'] = bool(suppress) - if sign not in [None, '-', '+', ' ', 'legacy']: - raise ValueError("sign option must be one of " - "' ', '+', '-', or 'legacy'") - modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal'] if floatmode not in modes + [None]: raise ValueError("floatmode option must be one of " + ", ".join('"{}"'.format(m) for m in modes)) + if sign not in [None, '-', '+', ' ']: + raise ValueError("sign option must be one of ' ', '+', or '-'") + + if legacy not in [None, False, '1.13']: + warnings.warn("legacy printing option can currently only be '1.13' or " + "`False`", stacklevel=3) + return options def set_printoptions(precision=None, threshold=None, edgeitems=None, linewidth=None, suppress=None, nanstr=None, infstr=None, - formatter=None, sign=None, floatmode=None): + formatter=None, sign=None, floatmode=None, **kwarg): """ Set printing options. @@ -100,8 +99,10 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, Parameters ---------- - precision : int, optional + precision : int or None, optional Number of digits of precision for floating point output (default 8). + May be `None` if `floatmode` is not `fixed`, to print as many digits as + necessary to uniquely specify the value. threshold : int, optional Total number of array elements which trigger summarization rather than full repr (default 1000). @@ -121,12 +122,11 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, String representation of floating point not-a-number (default nan). infstr : str, optional String representation of floating point infinity (default inf). - sign : string, either '-', '+', ' ' or 'legacy', optional + sign : string, either '-', '+', or ' ', optional Controls printing of the sign of floating-point types. If '+', always print the sign of positive values. If ' ', always prints a space (whitespace character) in the sign position of positive values. If - '-', omit the sign character of positive values. If 'legacy', print a - space for positive values except in 0d arrays. (default '-') + '-', omit the sign character of positive values. (default '-') formatter : dict of callables, optional If not None, the keys should indicate the type(s) that the respective formatting function applies to. Callables should return a string. @@ -170,6 +170,14 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, but if every element in the array can be uniquely represented with an equal number of fewer digits, use that many digits for all elements. + legacy : string or `False`, optional + If set to the string `'1.13'` enables 1.13 legacy printing mode. This + approximates numpy 1.13 print output by including a space in the sign + position of floats and different behavior for 0d arrays. If set to + `False`, disables legacy mode. Unrecognized strings will be ignored + with a warning for forward compatibility. + + .. versionadded:: 1.14.0 See Also -------- @@ -219,13 +227,26 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, ... linewidth=75, nanstr='nan', precision=8, ... suppress=False, threshold=1000, formatter=None) """ + legacy = kwarg.pop('legacy', None) + if kwarg: + msg = "set_printoptions() got unexpected keyword argument '{}'" + raise TypeError(msg.format(kwarg.popitem()[0])) + opt = _make_options_dict(precision, threshold, edgeitems, linewidth, suppress, nanstr, infstr, sign, formatter, - floatmode) + floatmode, legacy) # formatter is always reset opt['formatter'] = formatter _format_options.update(opt) + # set the C variable for legacy mode + if _format_options['legacy'] == '1.13': + set_legacy_print_mode(113) + # reset the sign option in legacy mode to avoid confusion + _format_options['sign'] = '-' + elif _format_options['legacy'] is False: + set_legacy_print_mode(0) + def get_printoptions(): """ @@ -255,22 +276,59 @@ def get_printoptions(): """ return _format_options.copy() -def _leading_trailing(a): - edgeitems = _format_options['edgeitems'] - if a.ndim == 1: - if len(a) > 2*edgeitems: - b = concatenate((a[:edgeitems], a[-edgeitems:])) - else: - b = a + +@contextlib.contextmanager +def printoptions(*args, **kwargs): + """Context manager for setting print options. + + Set print options for the scope of the `with` block, and restore the old + options at the end. See `set_printoptions` for the full description of + available options. + + Examples + -------- + + >>> with np.printoptions(precision=2): + ... print(np.array([2.0])) / 3 + [0.67] + + The `as`-clause of the `with`-statement gives the current print options: + + >>> with np.printoptions(precision=2) as opts: + ... assert_equal(opts, np.get_printoptions()) + + See Also + -------- + set_printoptions, get_printoptions + + """ + opts = np.get_printoptions() + try: + np.set_printoptions(*args, **kwargs) + yield np.get_printoptions() + finally: + np.set_printoptions(**opts) + + +def _leading_trailing(a, edgeitems, index=()): + """ + Keep only the N-D corners (leading and trailing edges) of an array. + + Should be passed a base-class ndarray, since it makes no guarantees about + preserving subclasses. + """ + axis = len(index) + if axis == a.ndim: + return a[index] + + if a.shape[axis] > 2*edgeitems: + return concatenate(( + _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]), + _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:]) + ), axis=axis) else: - if len(a) > 2*edgeitems: - l = [_leading_trailing(a[i]) for i in range(min(len(a), edgeitems))] - l.extend([_leading_trailing(a[-i]) for i in range( - min(len(a), edgeitems), 0, -1)]) - else: - l = [_leading_trailing(a[i]) for i in range(0, len(a))] - b = concatenate(tuple(l)) - return b + return _leading_trailing(a, edgeitems, index + np.index_exp[:]) + def _object_format(o): """ Object arrays containing lists should be printed unambiguously """ @@ -283,21 +341,30 @@ def _object_format(o): def repr_format(x): return repr(x) +def str_format(x): + return str(x) + def _get_formatdict(data, **opt): prec, fmode = opt['precision'], opt['floatmode'] supp, sign = opt['suppress'], opt['sign'] + legacy = opt['legacy'] # wrapped in lambdas to avoid taking a code path with the wrong type of data formatdict = { 'bool': lambda: BoolFormat(data), 'int': lambda: IntegerFormat(data), 'float': lambda: - FloatingFormat(data, prec, fmode, supp, sign), + FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), + 'longfloat': lambda: + FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), 'complexfloat': lambda: - ComplexFloatingFormat(data, prec, fmode, supp, sign), - 'datetime': lambda: DatetimeFormat(data), + ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), + 'longcomplexfloat': lambda: + ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), + 'datetime': lambda: DatetimeFormat(data, legacy=legacy), 'timedelta': lambda: TimedeltaFormat(data), 'object': lambda: _object_format, + 'void': lambda: str_format, 'numpystr': lambda: repr_format, 'str': lambda: str} @@ -316,7 +383,7 @@ def _get_formatdict(data, **opt): for key in ['int']: formatdict[key] = indirect(formatter['int_kind']) if 'float_kind' in fkeys: - for key in ['half', 'float', 'longfloat']: + for key in ['float', 'longfloat']: formatdict[key] = indirect(formatter['float_kind']) if 'complex_kind' in fkeys: for key in ['complexfloat', 'longcomplexfloat']: @@ -335,9 +402,6 @@ def _get_format_function(data, **options): find the right formatting function for the dtype_ """ dtype_ = data.dtype - if dtype_.fields is not None: - return StructureFormat.from_data(data, **options) - dtypeobj = dtype_.type formatdict = _get_formatdict(data, **options) if issubclass(dtypeobj, _nt.bool_): @@ -348,15 +412,26 @@ def _get_format_function(data, **options): else: return formatdict['int']() elif issubclass(dtypeobj, _nt.floating): - return formatdict['float']() + if issubclass(dtypeobj, _nt.longfloat): + return formatdict['longfloat']() + else: + return formatdict['float']() elif issubclass(dtypeobj, _nt.complexfloating): - return formatdict['complexfloat']() + if issubclass(dtypeobj, _nt.clongfloat): + return formatdict['longcomplexfloat']() + else: + return formatdict['complexfloat']() elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)): return formatdict['numpystr']() elif issubclass(dtypeobj, _nt.datetime64): return formatdict['datetime']() elif issubclass(dtypeobj, _nt.object_): return formatdict['object']() + elif issubclass(dtypeobj, _nt.void): + if dtype_.names is not None: + return StructuredVoidFormat.from_data(data, **options) + else: + return formatdict['void']() else: return formatdict['numpystr']() @@ -393,12 +468,17 @@ def _recursive_guard(fillvalue='...'): # gracefully handle recursive calls, when object arrays contain themselves @_recursive_guard() def _array2string(a, options, separator=' ', prefix=""): + # The formatter __init__s in _get_format_function cannot deal with + # subclasses yet, and we also need to avoid recursion issues in + # _formatArray with subclasses which return 0d arrays in place of scalars + a = asarray(a) + if a.size > options['threshold']: summary_insert = "..." - data = _leading_trailing(a) + data = _leading_trailing(a, options['edgeitems']) else: summary_insert = "" - data = asarray(a) + data = a # find the right formatting function for the array format_function = _get_format_function(data, **options) @@ -408,27 +488,28 @@ def _array2string(a, options, separator=' ', prefix=""): # skip over array( next_line_prefix += " "*len(prefix) - lst = _formatArray(a, format_function, a.ndim, options['linewidth'], - next_line_prefix, separator, - options['edgeitems'], summary_insert)[:-1] + lst = _formatArray(a, format_function, options['linewidth'], + next_line_prefix, separator, options['edgeitems'], + summary_insert, options['legacy']) return lst def array2string(a, max_line_width=None, precision=None, suppress_small=None, separator=' ', prefix="", style=np._NoValue, formatter=None, threshold=None, - edgeitems=None, sign=None, floatmode=None): + edgeitems=None, sign=None, floatmode=None, suffix="", + **kwarg): """ Return a string representation of an array. Parameters ---------- - a : ndarray + a : array_like Input array. max_line_width : int, optional The maximum number of columns the string should span. Newline characters splits the string appropriately after array elements. - precision : int, optional + precision : int or None, optional Floating point precision. Default is the current printing precision (usually 8), which can be altered using `set_printoptions`. suppress_small : bool, optional @@ -437,12 +518,14 @@ def array2string(a, max_line_width=None, precision=None, separator : str, optional Inserted between elements. prefix : str, optional - An array is typically printed as:: + suffix: str, optional + The length of the prefix and suffix strings are used to respectively + align and wrap the output. An array is typically printed as:: - 'prefix(' + array2string(a) + ')' + prefix + array2string(a) + suffix - The length of the prefix string is used to align the - output correctly. + The output is left-padded by the length of the prefix string, and + wrapping is forced at the column ``max_line_width - len(suffix)``. style : _NoValue, optional Has no effect, do not use. @@ -462,6 +545,7 @@ def array2string(a, max_line_width=None, precision=None, - 'longfloat' : 128-bit floats - 'complexfloat' - 'longcomplexfloat' : composed of two 128-bit floats + - 'void' : type `numpy.void` - 'numpystr' : types `numpy.string_` and `numpy.unicode_` - 'str' : all other strings @@ -478,12 +562,11 @@ def array2string(a, max_line_width=None, precision=None, edgeitems : int, optional Number of array items in summary at beginning and end of each dimension. - sign : string, either '-', '+', ' ' or 'legacy', optional + sign : string, either '-', '+', or ' ', optional Controls printing of the sign of floating-point types. If '+', always print the sign of positive values. If ' ', always prints a space (whitespace character) in the sign position of positive values. If - '-', omit the sign character of positive values. If 'legacy', print a - space for positive values except in 0d arrays. + '-', omit the sign character of positive values. floatmode : str, optional Controls the interpretation of the `precision` option for floating-point types. Can take the following values: @@ -501,6 +584,14 @@ def array2string(a, max_line_width=None, precision=None, but if every element in the array can be uniquely represented with an equal number of fewer digits, use that many digits for all elements. + legacy : string or `False`, optional + If set to the string `'1.13'` enables 1.13 legacy printing mode. This + approximates numpy 1.13 print output by including a space in the sign + position of floats and different behavior for 0d arrays. If set to + `False`, disables legacy mode. Unrecognized strings will be ignored + with a warning for forward compatibility. + + .. versionadded:: 1.14.0 Returns ------- @@ -541,35 +632,52 @@ def array2string(a, max_line_width=None, precision=None, '[0x0L 0x1L 0x2L]' """ - # Deprecation 05-16-2017 v1.14 - if style is not np._NoValue: - warnings.warn("'style' argument is deprecated and no longer functional", - DeprecationWarning, stacklevel=3) + legacy = kwarg.pop('legacy', None) + if kwarg: + msg = "array2string() got unexpected keyword argument '{}'" + raise TypeError(msg.format(kwarg.popitem()[0])) overrides = _make_options_dict(precision, threshold, edgeitems, max_line_width, suppress_small, None, None, - sign, formatter, floatmode) + sign, formatter, floatmode, legacy) options = _format_options.copy() options.update(overrides) + if options['legacy'] == '1.13': + if a.shape == () and not a.dtype.names: + return style(a.item()) + elif style is not np._NoValue: + # Deprecation 11-9-2017 v1.14 + warnings.warn("'style' argument is deprecated and no longer functional" + " except in 1.13 'legacy' mode", + DeprecationWarning, stacklevel=3) + + if options['legacy'] != '1.13': + options['linewidth'] -= len(suffix) + + # treat as a null array if any of shape elements == 0 if a.size == 0: - # treat as a null array if any of shape elements == 0 - lst = "[]" - else: - lst = _array2string(a, options, separator, prefix) - return lst + return "[]" + + return _array2string(a, options, separator, prefix) -def _extendLine(s, line, word, max_line_len, next_line_prefix): - if len(line.rstrip()) + len(word.rstrip()) >= max_line_len: +def _extendLine(s, line, word, line_width, next_line_prefix, legacy): + needs_wrap = len(line) + len(word) > line_width + if legacy != '1.13': + s# don't wrap lines if it won't help + if len(line) <= len(next_line_prefix): + needs_wrap = False + + if needs_wrap: s += line.rstrip() + "\n" line = next_line_prefix line += word return s, line -def _formatArray(a, format_function, rank, max_line_len, - next_line_prefix, separator, edge_items, summary_insert): +def _formatArray(a, format_function, line_width, next_line_prefix, + separator, edge_items, summary_insert, legacy): """formatArray is designed for two modes of operation: 1. Full output @@ -577,86 +685,144 @@ def _formatArray(a, format_function, rank, max_line_len, 2. Summarized output """ - if rank == 0: - return format_function(a[()]) + '\n' + def recurser(index, hanging_indent, curr_width): + """ + By using this local function, we don't need to recurse with all the + arguments. Since this function is not created recursively, the cost is + not significant + """ + axis = len(index) + axes_left = a.ndim - axis - if summary_insert and 2*edge_items < len(a): - leading_items = edge_items - trailing_items = edge_items - summary_insert1 = summary_insert + separator - else: - leading_items = 0 - trailing_items = len(a) - summary_insert1 = "" + if axes_left == 0: + return format_function(a[index]) - if rank == 1: - s = "" - line = next_line_prefix - for i in range(leading_items): - word = format_function(a[i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) + # when recursing, add a space to align with the [ added, and reduce the + # length of the line by 1 + next_hanging_indent = hanging_indent + ' ' + if legacy == '1.13': + next_width = curr_width + else: + next_width = curr_width - len(']') - if summary_insert1: - s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix) + a_len = a.shape[axis] + show_summary = summary_insert and 2*edge_items < a_len + if show_summary: + leading_items = edge_items + trailing_items = edge_items + else: + leading_items = 0 + trailing_items = a_len - for i in range(trailing_items, 1, -1): - word = format_function(a[-i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) + # stringify the array with the hanging indent on the first line too + s = '' - word = format_function(a[-1]) - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - s += line + "]\n" - s = '[' + s[len(next_line_prefix):] - else: - s = '[' - sep = separator.rstrip() - for i in range(leading_items): - if i > 0: - s += next_line_prefix - s += _formatArray(a[i], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1) - - if summary_insert1: - s += next_line_prefix + summary_insert1 + "\n" - - for i in range(trailing_items, 1, -1): - if leading_items or i != trailing_items: - s += next_line_prefix - s += _formatArray(a[-i], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1) - if leading_items or trailing_items > 1: - s += next_line_prefix - s += _formatArray(a[-1], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert).rstrip()+']\n' - return s + # last axis (rows) - wrap elements if they would not fit on one line + if axes_left == 1: + # the length up until the beginning of the separator / bracket + if legacy == '1.13': + elem_width = curr_width - len(separator.rstrip()) + else: + elem_width = curr_width - max(len(separator.rstrip()), len(']')) + + line = hanging_indent + for i in range(leading_items): + word = recurser(index + (i,), next_hanging_indent, next_width) + s, line = _extendLine( + s, line, word, elem_width, hanging_indent, legacy) + line += separator + + if show_summary: + s, line = _extendLine( + s, line, summary_insert, elem_width, hanging_indent, legacy) + if legacy == '1.13': + line += ", " + else: + line += separator + + for i in range(trailing_items, 1, -1): + word = recurser(index + (-i,), next_hanging_indent, next_width) + s, line = _extendLine( + s, line, word, elem_width, hanging_indent, legacy) + line += separator + + if legacy == '1.13': + # width of the separator is not considered on 1.13 + elem_width = curr_width + word = recurser(index + (-1,), next_hanging_indent, next_width) + s, line = _extendLine( + s, line, word, elem_width, hanging_indent, legacy) + + s += line + + # other axes - insert newlines between rows + else: + s = '' + line_sep = separator.rstrip() + '\n'*(axes_left - 1) + + for i in range(leading_items): + nested = recurser(index + (i,), next_hanging_indent, next_width) + s += hanging_indent + nested + line_sep + + if show_summary: + if legacy == '1.13': + # trailing space, fixed nbr of newlines, and fixed separator + s += hanging_indent + summary_insert + ", \n" + else: + s += hanging_indent + summary_insert + line_sep + + for i in range(trailing_items, 1, -1): + nested = recurser(index + (-i,), next_hanging_indent, + next_width) + s += hanging_indent + nested + line_sep + + nested = recurser(index + (-1,), next_hanging_indent, next_width) + s += hanging_indent + nested + # remove the hanging indent, and wrap in [] + s = '[' + s[len(hanging_indent):] + ']' + return s + + try: + # invoke the recursive part with an initial index and prefix + return recurser(index=(), + hanging_indent=next_line_prefix, + curr_width=line_width) + finally: + # recursive closures have a cyclic reference to themselves, which + # requires gc to collect (gh-10620). To avoid this problem, for + # performance and PyPy friendliness, we break the cycle: + recurser = None + +def _none_or_positive_arg(x, name): + if x is None: + return -1 + if x < 0: + raise ValueError("{} must be >= 0".format(name)) + return x class FloatingFormat(object): """ Formatter for subtypes of np.floating """ - - def __init__(self, data, precision, floatmode, suppress_small, sign=False): + def __init__(self, data, precision, floatmode, suppress_small, sign=False, + **kwarg): # for backcompatibility, accept bools if isinstance(sign, bool): sign = '+' if sign else '-' - self._legacy = False - if sign == 'legacy': - self._legacy = True - sign = '-' if data.shape == () else ' ' + self._legacy = kwarg.get('legacy', False) + if self._legacy == '1.13': + # when not 0d, legacy does not support '-' + if data.shape != () and sign == '-': + sign = ' ' self.floatmode = floatmode if floatmode == 'unique': - self.precision = -1 + self.precision = None else: - if precision < 0: - raise ValueError( - "precision must be >= 0 in {} mode".format(floatmode)) self.precision = precision + + self.precision = _none_or_positive_arg(self.precision, 'precision') + self.suppress_small = suppress_small self.sign = sign self.exp_format = False @@ -665,40 +831,33 @@ class FloatingFormat(object): self.fillFormat(data) def fillFormat(self, data): - with errstate(all='ignore'): - hasinf = isinf(data) - special = isnan(data) | hasinf - valid = not_equal(data, 0) & ~special - non_zero = data[valid] - abs_non_zero = absolute(non_zero) - if len(non_zero) == 0: - max_val = 0. - min_val = 0. - min_val_sgn = 0. - else: - max_val = np.max(abs_non_zero) - min_val = np.min(abs_non_zero) - min_val_sgn = np.min(non_zero) - if max_val >= 1.e8: - self.exp_format = True - if not self.suppress_small and (min_val < 0.0001 - or max_val/min_val > 1000.): + # only the finite values are used to compute the number of digits + finite_vals = data[isfinite(data)] + + # choose exponential mode based on the non-zero finite values: + abs_non_zero = absolute(finite_vals[finite_vals != 0]) + if len(abs_non_zero) != 0: + max_val = np.max(abs_non_zero) + min_val = np.min(abs_non_zero) + with errstate(over='ignore'): # division can overflow + if max_val >= 1.e8 or (not self.suppress_small and + (min_val < 0.0001 or max_val/min_val > 1000.)): self.exp_format = True - if len(non_zero) == 0: + # do a first pass of printing all the numbers, to determine sizes + if len(finite_vals) == 0: self.pad_left = 0 self.pad_right = 0 self.trim = '.' self.exp_size = -1 self.unique = True elif self.exp_format: - # first pass printing to determine sizes trim, unique = '.', True - if self.floatmode == 'fixed' or self._legacy: + if self.floatmode == 'fixed' or self._legacy == '1.13': trim, unique = 'k', False strs = (dragon4_scientific(x, precision=self.precision, unique=unique, trim=trim, sign=self.sign == '+') - for x in non_zero) + for x in finite_vals) frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs)) int_part, frac_part = zip(*(s.split('.') for s in frac_strs)) self.exp_size = max(len(s) for s in exp_strs) - 1 @@ -706,13 +865,13 @@ class FloatingFormat(object): self.trim = 'k' self.precision = max(len(s) for s in frac_part) - # for back-compatibility with np 1.13, use two spaces and full prec - if self._legacy: - self.pad_left = 2 + (not (all(non_zero > 0) and self.sign == ' ')) + # for back-compat with np 1.13, use 2 spaces & sign and full prec + if self._legacy == '1.13': + self.pad_left = 3 else: - # this should be only 1 or two. Can be calculated from sign. + # this should be only 1 or 2. Can be calculated from sign. self.pad_left = max(len(s) for s in int_part) - # pad_right is not used to print, but needed for nan length calculation + # pad_right is only needed for nan length calculation self.pad_right = self.exp_size + 2 + self.precision self.unique = False @@ -722,11 +881,15 @@ class FloatingFormat(object): if self.floatmode == 'fixed': trim, unique = 'k', False strs = (dragon4_positional(x, precision=self.precision, + fractional=True, unique=unique, trim=trim, sign=self.sign == '+') - for x in non_zero) + for x in finite_vals) int_part, frac_part = zip(*(s.split('.') for s in strs)) - self.pad_left = max(len(s) for s in int_part) + if self._legacy == '1.13': + self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part) + else: + self.pad_left = max(len(s) for s in int_part) self.pad_right = max(len(s) for s in frac_part) self.exp_size = -1 @@ -738,12 +901,14 @@ class FloatingFormat(object): self.unique = True self.trim = '.' - # account for sign = ' ' by adding one to pad_left - if len(non_zero) > 0 and all(non_zero > 0) and self.sign == ' ': - self.pad_left += 1 + if self._legacy != '1.13': + # account for sign = ' ' by adding one to pad_left + if self.sign == ' ' and not any(np.signbit(finite_vals)): + self.pad_left += 1 - if any(special): - neginf = self.sign != '-' or any(data[hasinf] < 0) + # if there are non-finite values, may need to increase pad_left + if data.size != finite_vals.size: + neginf = self.sign != '-' or any(data[isinf(data)] < 0) nanlen = len(_format_options['nanstr']) inflen = len(_format_options['infstr']) + neginf offset = self.pad_right + 1 # +1 for decimal pt @@ -761,23 +926,41 @@ class FloatingFormat(object): return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret if self.exp_format: - return dragon4_scientific(x, precision=self.precision, + return dragon4_scientific(x, + precision=self.precision, unique=self.unique, - trim=self.trim, sign=self.sign == '+', + trim=self.trim, + sign=self.sign == '+', pad_left=self.pad_left, exp_digits=self.exp_size) else: - return dragon4_positional(x, precision=self.precision, + return dragon4_positional(x, + precision=self.precision, unique=self.unique, - trim=self.trim, sign=self.sign == '+', + fractional=True, + trim=self.trim, + sign=self.sign == '+', pad_left=self.pad_left, pad_right=self.pad_right) +# for back-compatibility, we keep the classes for each float type too +class FloatFormat(FloatingFormat): + def __init__(self, *args, **kwargs): + warnings.warn("FloatFormat has been replaced by FloatingFormat", + DeprecationWarning, stacklevel=2) + super(FloatFormat, self).__init__(*args, **kwargs) + + +class LongFloatFormat(FloatingFormat): + def __init__(self, *args, **kwargs): + warnings.warn("LongFloatFormat has been replaced by FloatingFormat", + DeprecationWarning, stacklevel=2) + super(LongFloatFormat, self).__init__(*args, **kwargs) def format_float_scientific(x, precision=None, unique=True, trim='k', sign=False, pad_left=None, exp_digits=None): """ - Format a floating-point scalar as a string in fractional notation. + Format a floating-point scalar as a decimal string in scientific notation. Provides control over rounding, trimming and padding. Uses and assumes IEEE unbiased rounding. Uses the "Dragon4" algorithm. @@ -786,19 +969,18 @@ def format_float_scientific(x, precision=None, unique=True, trim='k', ---------- x : python float or numpy floating scalar Value to format. - precision : non-negative integer, optional - Maximum number of fractional digits to print. May be ommited - if `unique` is `True`, but is required if unique is `False`. + precision : non-negative integer or None, optional + Maximum number of digits to print. May be None if `unique` is + `True`, but must be an integer if unique is `False`. unique : boolean, optional - If `False`, output exactly `precision` fractional digits and round the - remaining value. Digits are generated as if printing an - infinite-precision value and stopping after `precision` digits. If `True`, use a digit-generation strategy which gives the shortest representation which uniquely identifies the floating-point number from other values of the same type, by judicious rounding. If `precision` - was omitted, print out the full unique representation, otherwise digit - generation is cut off after `precision` digits and the remaining value - is rounded. + was omitted, print all necessary digits, otherwise digit generation is + cut off after `precision` digits and the remaining value is rounded. + If `False`, digits are generated as if printing an infinite-precision + value and stopping after `precision` digits, rounding the remaining + value. trim : one of 'k', '.', '0', '-', optional Controls post-processing trimming of trailing digits, as follows: k : keep trailing zeros, keep decimal point (no trimming) @@ -834,38 +1016,43 @@ def format_float_scientific(x, precision=None, unique=True, trim='k', >>> np.format_float_scientific(s, exp_digits=4) '1.23e+0024' """ - precision = -1 if precision is None else precision - pad_left = -1 if pad_left is None else pad_left - exp_digits = -1 if exp_digits is None else exp_digits - return dragon4_scientific(x, precision=precision, unique=unique, trim=trim, - sign=sign, pad_left=pad_left, + precision = _none_or_positive_arg(precision, 'precision') + pad_left = _none_or_positive_arg(pad_left, 'pad_left') + exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits') + return dragon4_scientific(x, precision=precision, unique=unique, + trim=trim, sign=sign, pad_left=pad_left, exp_digits=exp_digits) -def format_float_positional(x, precision=None, unique=True, trim='k', - sign=False, pad_left=None, pad_right=None): +def format_float_positional(x, precision=None, unique=True, + fractional=True, trim='k', sign=False, + pad_left=None, pad_right=None): """ - Format a floating-point scalar as a string in scientific notation. + Format a floating-point scalar as a decimal string in positional notation. - Provides control over rounding, trimming and padding. Uses and assumes + Provides control over rounding, trimming and padding. Uses and assumes IEEE unbiased rounding. Uses the "Dragon4" algorithm. Parameters ---------- x : python float or numpy floating scalar Value to format. - precision : non-negative integer, optional - Maximum number of fractional digits to print. May be ommited - if `unique` is `True`, but is required if unique is `False`. + precision : non-negative integer or None, optional + Maximum number of digits to print. May be None if `unique` is + `True`, but must be an integer if unique is `False`. unique : boolean, optional - If `False`, output exactly `precision` fractional digits and round the - remaining value. Digits are generated as if printing an - infinite-precision value and stopping after `precision` digits. If `True`, use a digit-generation strategy which gives the shortest representation which uniquely identifies the floating-point number from other values of the same type, by judicious rounding. If `precision` - was omitted, print out the full unique representation, otherwise digit - generation is cut off after `precision` digits and the remaining value - is rounded. + was omitted, print out all necessary digits, otherwise digit generation + is cut off after `precision` digits and the remaining value is rounded. + If `False`, digits are generated as if printing an infinite-precision + value and stopping after `precision` digits, rounding the remaining + value. + fractional : boolean, optional + If `True`, the cutoff of `precision` digits refers to the total number + of digits after the decimal point, including leading zeros. + If `False`, `precision` refers to the total number of significant + digits, before or after the decimal point, ignoring leading zeros. trim : one of 'k', '.', '0', '-', optional Controls post-processing trimming of trailing digits, as follows: k : keep trailing zeros, keep decimal point (no trimming) @@ -902,33 +1089,26 @@ def format_float_positional(x, precision=None, unique=True, trim='k', >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10) '0.3000488281' """ - precision = -1 if precision is None else precision - pad_left = -1 if pad_left is None else pad_left - pad_right = -1 if pad_right is None else pad_right - return dragon4_positional(x, precision=precision, unique=unique, trim=trim, + precision = _none_or_positive_arg(precision, 'precision') + pad_left = _none_or_positive_arg(pad_left, 'pad_left') + pad_right = _none_or_positive_arg(pad_right, 'pad_right') + return dragon4_positional(x, precision=precision, unique=unique, + fractional=fractional, trim=trim, sign=sign, pad_left=pad_left, pad_right=pad_right) class IntegerFormat(object): def __init__(self, data): - try: + if data.size > 0: max_str_len = max(len(str(np.max(data))), len(str(np.min(data)))) - self.format = '%' + str(max_str_len) + 'd' - except (TypeError, NotImplementedError): - # if reduce(data) fails, this instance will not be called, just - # instantiated in formatdict. - pass - except ValueError: - # this occurs when everything is NA - pass + else: + max_str_len = 0 + self.format = '%{}d'.format(max_str_len) def __call__(self, x): - if _MININT < x < _MAXINT: - return self.format % x - else: - return "%s" % x + return self.format % x class BoolFormat(object): @@ -943,24 +1123,77 @@ class BoolFormat(object): class ComplexFloatingFormat(object): """ Formatter for subtypes of np.complexfloating """ - - def __init__(self, x, precision, floatmode, suppress_small, sign=False): + def __init__(self, x, precision, floatmode, suppress_small, + sign=False, **kwarg): # for backcompatibility, accept bools if isinstance(sign, bool): sign = '+' if sign else '-' - self.real_format = FloatingFormat(x.real, precision, floatmode, - suppress_small, sign=sign) - self.imag_format = FloatingFormat(x.imag, precision, floatmode, - suppress_small, sign='+') + floatmode_real = floatmode_imag = floatmode + if kwarg.get('legacy', False) == '1.13': + floatmode_real = 'maxprec_equal' + floatmode_imag = 'maxprec' + + self.real_format = FloatingFormat(x.real, precision, floatmode_real, + suppress_small, sign=sign, **kwarg) + self.imag_format = FloatingFormat(x.imag, precision, floatmode_imag, + suppress_small, sign='+', **kwarg) def __call__(self, x): r = self.real_format(x.real) i = self.imag_format(x.imag) - return r + i + 'j' -class DatetimeFormat(object): - def __init__(self, x, unit=None, timezone=None, casting='same_kind'): + # add the 'j' before the terminal whitespace in i + sp = len(i.rstrip()) + i = i[:sp] + 'j' + i[sp:] + + return r + i + +# for back-compatibility, we keep the classes for each complex type too +class ComplexFormat(ComplexFloatingFormat): + def __init__(self, *args, **kwargs): + warnings.warn( + "ComplexFormat has been replaced by ComplexFloatingFormat", + DeprecationWarning, stacklevel=2) + super(ComplexFormat, self).__init__(*args, **kwargs) + +class LongComplexFormat(ComplexFloatingFormat): + def __init__(self, *args, **kwargs): + warnings.warn( + "LongComplexFormat has been replaced by ComplexFloatingFormat", + DeprecationWarning, stacklevel=2) + super(LongComplexFormat, self).__init__(*args, **kwargs) + + +class _TimelikeFormat(object): + def __init__(self, data): + non_nat = data[~isnat(data)] + if len(non_nat) > 0: + # Max str length of non-NaT elements + max_str_len = max(len(self._format_non_nat(np.max(non_nat))), + len(self._format_non_nat(np.min(non_nat)))) + else: + max_str_len = 0 + if len(non_nat) < data.size: + # data contains a NaT + max_str_len = max(max_str_len, 5) + self._format = '%{}s'.format(max_str_len) + self._nat = "'NaT'".rjust(max_str_len) + + def _format_non_nat(self, x): + # override in subclass + raise NotImplementedError + + def __call__(self, x): + if isnat(x): + return self._nat + else: + return self._format % self._format_non_nat(x) + + +class DatetimeFormat(_TimelikeFormat): + def __init__(self, x, unit=None, timezone=None, casting='same_kind', + legacy=False): # Get the unit from the dtype if unit is None: if x.dtype.kind == 'M': @@ -973,38 +1206,26 @@ class DatetimeFormat(object): self.timezone = timezone self.unit = unit self.casting = casting + self.legacy = legacy + + # must be called after the above are configured + super(DatetimeFormat, self).__init__(x) def __call__(self, x): + if self.legacy == '1.13': + return self._format_non_nat(x) + return super(DatetimeFormat, self).__call__(x) + + def _format_non_nat(self, x): return "'%s'" % datetime_as_string(x, unit=self.unit, timezone=self.timezone, casting=self.casting) -class TimedeltaFormat(object): - def __init__(self, data): - nat_value = array(['NaT'], dtype=data.dtype)[0] - int_dtype = dtype(data.dtype.byteorder + 'i8') - int_view = data.view(int_dtype) - v = int_view[not_equal(int_view, nat_value.view(int_dtype))] - if len(v) > 0: - # Max str length of non-NaT elements - max_str_len = max(len(str(np.max(v))), - len(str(np.min(v)))) - else: - max_str_len = 0 - if len(v) < len(data): - # data contains a NaT - max_str_len = max(max_str_len, 5) - self.format = '%' + str(max_str_len) + 'd' - self._nat = "'NaT'".rjust(max_str_len) - - def __call__(self, x): - # TODO: After NAT == NAT deprecation should be simplified: - if (x + 1).view('i8') == x.view('i8'): - return self._nat - else: - return self.format % x.astype('i8') +class TimedeltaFormat(_TimelikeFormat): + def _format_non_nat(self, x): + return str(x.astype('i8')) class SubArrayFormat(object): @@ -1017,15 +1238,21 @@ class SubArrayFormat(object): return "[" + ", ".join(self.__call__(a) for a in arr) + "]" -class StructureFormat(object): +class StructuredVoidFormat(object): + """ + Formatter for structured np.void objects. + + This does not work on structured alias types like np.dtype(('i4', 'i2,i2')), + as alias scalars lose their field information, and the implementation + relies upon np.void.__getitem__. + """ def __init__(self, format_functions): self.format_functions = format_functions - self.num_fields = len(format_functions) @classmethod def from_data(cls, data, **options): """ - This is a second way to initialize StructureFormat, using the raw data + This is a second way to initialize StructuredVoidFormat, using the raw data as input. Added to avoid changing the signature of __init__. """ format_functions = [] @@ -1037,10 +1264,24 @@ class StructureFormat(object): return cls(format_functions) def __call__(self, x): - s = "(" - for field, format_function in zip(x, self.format_functions): - s += format_function(field) + ", " - return (s[:-2] if 1 < self.num_fields else s[:-1]) + ")" + str_fields = [ + format_function(field) + for field, format_function in zip(x, self.format_functions) + ] + if len(str_fields) == 1: + return "({},)".format(str_fields[0]) + else: + return "({})".format(", ".join(str_fields)) + + +# for backwards compatibility +class StructureFormat(StructuredVoidFormat): + def __init__(self, *args, **kwargs): + # NumPy 1.14, 2018-02-14 + warnings.warn( + "StructureFormat has been replaced by StructuredVoidFormat", + DeprecationWarning, stacklevel=2) + super(StructureFormat, self).__init__(*args, **kwargs) def _void_scalar_repr(x): @@ -1049,15 +1290,76 @@ def _void_scalar_repr(x): scalartypes.c.src code, and is placed here because it uses the elementwise formatters defined above. """ - return StructureFormat.from_data(array(x), **_format_options)(x) + return StructuredVoidFormat.from_data(array(x), **_format_options)(x) -_typelessdata = [int_, float_, complex_] +_typelessdata = [int_, float_, complex_, bool_] if issubclass(intc, int): _typelessdata.append(intc) if issubclass(longlong, int): _typelessdata.append(longlong) + +def dtype_is_implied(dtype): + """ + Determine if the given dtype is implied by the representation of its values. + + Parameters + ---------- + dtype : dtype + Data type + + Returns + ------- + implied : bool + True if the dtype is implied by the representation of its values. + + Examples + -------- + >>> np.core.arrayprint.dtype_is_implied(int) + True + >>> np.array([1, 2, 3], int) + array([1, 2, 3]) + >>> np.core.arrayprint.dtype_is_implied(np.int8) + False + >>> np.array([1, 2, 3], np.int8) + array([1, 2, 3], dtype=np.int8) + """ + dtype = np.dtype(dtype) + if _format_options['legacy'] == '1.13' and dtype.type == bool_: + return False + + # not just void types can be structured, and names are not part of the repr + if dtype.names is not None: + return False + + return dtype.type in _typelessdata + + +def dtype_short_repr(dtype): + """ + Convert a dtype to a short form which evaluates to the same dtype. + + The intent is roughly that the following holds + + >>> from numpy import * + >>> assert eval(dtype_short_repr(dt)) == dt + """ + if dtype.names is not None: + # structured dtypes give a list or tuple repr + return str(dtype) + elif issubclass(dtype.type, flexible): + # handle these separately so they don't give garbage like str256 + return "'%s'" % str(dtype) + + typename = dtype.name + # quote typenames which can't be represented as python variable names + if typename and not (typename[0].isalpha() and typename.isalnum()): + typename = repr(typename) + + return typename + + def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): """ Return the string representation of an array. @@ -1100,35 +1402,49 @@ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): 'array([ 0.000001, 0. , 2. , 3. ])' """ + if max_line_width is None: + max_line_width = _format_options['linewidth'] + if type(arr) is not ndarray: class_name = type(arr).__name__ else: class_name = "array" - if arr.size > 0 or arr.shape == (0,): + skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0 + + prefix = class_name + "(" + suffix = ")" if skipdtype else "," + + if (_format_options['legacy'] == '1.13' and + arr.shape == () and not arr.dtype.names): + lst = repr(arr.item()) + elif arr.size > 0 or arr.shape == (0,): lst = array2string(arr, max_line_width, precision, suppress_small, - ', ', class_name + "(") + ', ', prefix, suffix=suffix) else: # show zero-length shape unless it is (0,) lst = "[], shape=%s" % (repr(arr.shape),) - skipdtype = (arr.dtype.type in _typelessdata) and arr.size > 0 + arr_str = prefix + lst + suffix if skipdtype: - return "%s(%s)" % (class_name, lst) - else: - typename = arr.dtype.name - # Quote typename in the output if it is "complex". - if typename and not (typename[0].isalpha() and typename.isalnum()): - typename = "'%s'" % typename + return arr_str + + dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype)) - lf = ' ' + # compute whether we should put dtype on a new line: Do so if adding the + # dtype would extend the last line past max_line_width. + # Note: This line gives the correct result even when rfind returns -1. + last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1) + spacer = " " + if _format_options['legacy'] == '1.13': if issubclass(arr.dtype.type, flexible): - if arr.dtype.names: - typename = "%s" % str(arr.dtype) - else: - typename = "'%s'" % str(arr.dtype) - lf = '\n'+' '*len(class_name + "(") - return "%s(%s,%sdtype=%s)" % (class_name, lst, lf, typename) + spacer = '\n' + ' '*len(class_name + "(") + elif last_line_len + len(dtype_str) + 1 > max_line_width: + spacer = '\n' + ' '*len(class_name + "(") + + return arr_str + spacer + dtype_str + +_guarded_str = _recursive_guard()(str) def array_str(a, max_line_width=None, precision=None, suppress_small=None): """ @@ -1164,6 +1480,19 @@ def array_str(a, max_line_width=None, precision=None, suppress_small=None): '[0 1 2]' """ + if (_format_options['legacy'] == '1.13' and + a.shape == () and not a.dtype.names): + return str(a.item()) + + # the str of 0d arrays is a special case: It should appear like a scalar, + # so floats are not truncated by `precision`, and strings are not wrapped + # in quotes. So we return the str of the scalar value. + if a.shape == (): + # obtain a scalar and call str on it, avoiding problems for subclasses + # for which indexing with () returns a 0d instead of a scalar by using + # ndarray's getindex. Also guard against recursive 0d object arrays. + return _guarded_str(np.ndarray.__getitem__(a, ())) + return array2string(a, max_line_width, precision, suppress_small, ' ', "") def set_string_function(f, repr=True): diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt index 6e6547129..68ac5109c 100644 --- a/numpy/core/code_generators/cversions.txt +++ b/numpy/core/code_generators/cversions.txt @@ -36,5 +36,8 @@ 0x0000000a = 9b8bce614655d3eb02acddcb508203cb # Version 11 (NumPy 1.13) Added PyArray_MapIterArrayCopyIfOverlap -# Version 11 (NumPy 1.14) No Change 0x0000000b = edb1ba83730c650fd9bc5772a919cda7 + +# Version 12 (NumPy 1.14) Added PyArray_ResolveWritebackIfCopy, +# PyArray_SetWritebackIfCopyBase and deprecated PyArray_SetUpdateIfCopyBase. +0x0000000c = a1bc756c5782853ec2e3616cf66869d8 diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index af058b4be..ebcf864ea 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -84,8 +84,9 @@ def TD(types, f=None, astype=None, in_=None, out=None, simd=None): if f is not None: if isinstance(f, str): func_data = build_func_data(types, f) + elif len(f) != len(types): + raise ValueError("Number of types and f do not match") else: - assert len(f) == len(types) func_data = f else: func_data = (None,) * len(types) @@ -93,10 +94,14 @@ def TD(types, f=None, astype=None, in_=None, out=None, simd=None): in_ = (in_,) * len(types) elif in_ is None: in_ = (None,) * len(types) + elif len(in_) != len(types): + raise ValueError("Number of types and inputs do not match") if isinstance(out, str): out = (out,) * len(types) elif out is None: out = (None,) * len(types) + elif len(out) != len(types): + raise ValueError("Number of types and outputs do not match") tds = [] for t, fd, i, o in zip(types, func_data, in_, out): # [(simd-name, list of types)] @@ -789,7 +794,7 @@ defdict = { docstrings.get('numpy.core.umath.divmod'), None, TD(intflt), - TD(O, f='PyNumber_Divmod'), + # TD(O, f='PyNumber_Divmod'), # gh-9730 ), 'hypot': Ufunc(2, 1, Zero, @@ -875,6 +880,20 @@ defdict = { TypeDescription('d', None, 'd', 'di'), TypeDescription('g', None, 'g', 'gi'), ], + ), +'gcd' : + Ufunc(2, 1, Zero, + docstrings.get('numpy.core.umath.gcd'), + "PyUFunc_SimpleBinaryOperationTypeResolver", + TD(ints), + TD('O', f='npy_ObjectGCD'), + ), +'lcm' : + Ufunc(2, 1, None, + docstrings.get('numpy.core.umath.lcm'), + "PyUFunc_SimpleBinaryOperationTypeResolver", + TD(ints), + TD('O', f='npy_ObjectLCM'), ) } @@ -928,16 +947,42 @@ def make_arrays(funcdict): k = 0 sub = 0 - if uf.nin > 1: - assert uf.nin == 2 - thedict = chartotype2 # two inputs and one output - else: - thedict = chartotype1 # one input and one output - for t in uf.type_descriptions: - if (t.func_data not in (None, FullTypeDescr) and - not isinstance(t.func_data, FuncNameSuffix)): + if t.func_data is FullTypeDescr: + tname = english_upper(chartoname[t.type]) + datalist.append('(void *)NULL') + funclist.append( + '%s_%s_%s_%s' % (tname, t.in_, t.out, name)) + elif isinstance(t.func_data, FuncNameSuffix): + datalist.append('(void *)NULL') + tname = english_upper(chartoname[t.type]) + funclist.append( + '%s_%s_%s' % (tname, name, t.func_data.suffix)) + elif t.func_data is None: + datalist.append('(void *)NULL') + tname = english_upper(chartoname[t.type]) + funclist.append('%s_%s' % (tname, name)) + if t.simd is not None: + for vt in t.simd: + code2list.append(textwrap.dedent("""\ + #ifdef HAVE_ATTRIBUTE_TARGET_{ISA} + if (NPY_CPU_SUPPORTS_{ISA}) {{ + {fname}_functions[{idx}] = {type}_{fname}_{isa}; + }} + #endif + """).format( + ISA=vt.upper(), isa=vt, + fname=name, type=tname, idx=k + )) + else: funclist.append('NULL') + if (uf.nin, uf.nout) == (2, 1): + thedict = chartotype2 + elif (uf.nin, uf.nout) == (1, 1): + thedict = chartotype1 + else: + raise ValueError("Could not handle {}[{}]".format(name, t.type)) + astype = '' if not t.astype is None: astype = '_As_%s' % thedict[t.astype] @@ -958,29 +1003,6 @@ def make_arrays(funcdict): datalist.append('(void *)NULL') #datalist.append('(void *)%s' % t.func_data) sub += 1 - elif t.func_data is FullTypeDescr: - tname = english_upper(chartoname[t.type]) - datalist.append('(void *)NULL') - funclist.append( - '%s_%s_%s_%s' % (tname, t.in_, t.out, name)) - elif isinstance(t.func_data, FuncNameSuffix): - datalist.append('(void *)NULL') - tname = english_upper(chartoname[t.type]) - funclist.append( - '%s_%s_%s' % (tname, name, t.func_data.suffix)) - else: - datalist.append('(void *)NULL') - tname = english_upper(chartoname[t.type]) - funclist.append('%s_%s' % (tname, name)) - if t.simd is not None: - for vt in t.simd: - code2list.append("""\ -#ifdef HAVE_ATTRIBUTE_TARGET_{ISA} -if (NPY_CPU_SUPPORTS_{ISA}) {{ - {fname}_functions[{idx}] = {type}_{fname}_{isa}; -}} -#endif -""".format(ISA=vt.upper(), isa=vt, fname=name, type=tname, idx=k)) for x in t.in_ + t.out: siglist.append('NPY_%s' % (english_upper(chartoname[x]),)) @@ -1018,14 +1040,19 @@ def make_ufuncs(funcdict): # string literal in C code. We split at endlines because textwrap.wrap # do not play well with \n docstring = '\\n\"\"'.join(docstring.split(r"\n")) - mlist.append(\ -r"""f = PyUFunc_FromFuncAndData(%s_functions, %s_data, %s_signatures, %d, - %d, %d, %s, "%s", - "%s", 0);""" % (name, name, name, - len(uf.type_descriptions), - uf.nin, uf.nout, - uf.identity, - name, docstring)) + fmt = textwrap.dedent("""\ + f = PyUFunc_FromFuncAndData( + {name}_functions, {name}_data, {name}_signatures, {nloops}, + {nin}, {nout}, {identity}, "{name}", + "{doc}", 0 + ); + if (f == NULL) {{ + return -1; + }}""") + mlist.append(fmt.format( + name=name, nloops=len(uf.type_descriptions), + nin=uf.nin, nout=uf.nout, identity=uf.identity, doc=docstring + )) if uf.typereso is not None: mlist.append( r"((PyUFuncObject *)f)->type_resolver = &%s;" % uf.typereso) @@ -1040,23 +1067,25 @@ def make_code(funcdict, filename): code3 = make_ufuncs(funcdict) code2 = indent(code2, 4) code3 = indent(code3, 4) - code = r""" + code = textwrap.dedent(r""" -/** Warning this file is autogenerated!!! + /** Warning this file is autogenerated!!! - Please make changes to the code generator program (%s) -**/ + Please make changes to the code generator program (%s) + **/ -%s + %s -static void -InitOperators(PyObject *dictionary) { - PyObject *f; + static int + InitOperators(PyObject *dictionary) { + PyObject *f; -%s -%s -} -""" % (filename, code1, code2, code3) + %s + %s + + return 0; + } + """) % (filename, code1, code2, code3) return code diff --git a/numpy/core/code_generators/numpy_api.py b/numpy/core/code_generators/numpy_api.py index d1406e3b2..a454d95b0 100644 --- a/numpy/core/code_generators/numpy_api.py +++ b/numpy/core/code_generators/numpy_api.py @@ -346,6 +346,9 @@ multiarray_funcs_api = { # End 1.10 API 'PyArray_MapIterArrayCopyIfOverlap': (301,), # End 1.13 API + 'PyArray_ResolveWritebackIfCopy': (302,), + 'PyArray_SetWritebackIfCopyBase': (303,), + # End 1.14 API } ufunc_types_api = { diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index 6aae57234..75dee7084 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -43,6 +43,8 @@ add_newdoc('numpy.core.umath', 'absolute', """ Calculate the absolute value element-wise. + ``np.abs`` is a shorthand for this function. + Parameters ---------- x : array_like @@ -295,7 +297,7 @@ add_newdoc('numpy.core.umath', 'arcsinh', Returns ------- out : ndarray - Array of of the same shape as `x`. + Array of the same shape as `x`. Notes ----- @@ -573,7 +575,7 @@ add_newdoc('numpy.core.umath', 'bitwise_and', >>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16])) array([ 2, 4, 16]) >>> np.bitwise_and([True, True], [False, True]) - array([False, True], dtype=bool) + array([False, True]) """) @@ -630,7 +632,7 @@ add_newdoc('numpy.core.umath', 'bitwise_or', ... np.array([4, 4, 4, 2147483647L], dtype=np.int32)) array([ 6, 5, 255, 2147483647]) >>> np.bitwise_or([True, True], [False, True]) - array([ True, True], dtype=bool) + array([ True, True]) """) @@ -680,7 +682,7 @@ add_newdoc('numpy.core.umath', 'bitwise_xor', >>> np.bitwise_xor([31,3], [5,6]) array([26, 5]) >>> np.bitwise_xor([True, True], [False, True]) - array([ True, False], dtype=bool) + array([ True, False]) """) @@ -1057,13 +1059,13 @@ add_newdoc('numpy.core.umath', 'equal', Examples -------- >>> np.equal([0, 1, 3], np.arange(3)) - array([ True, True, False], dtype=bool) + array([ True, True, False]) What is compared are values, not types. So an int (1) and an array of length one can evaluate as True: >>> np.equal(1, np.ones(1)) - array([ True], dtype=bool) + array([ True]) """) @@ -1389,14 +1391,14 @@ add_newdoc('numpy.core.umath', 'greater', Examples -------- >>> np.greater([4,2],[2,2]) - array([ True, False], dtype=bool) + array([ True, False]) If the inputs are ndarrays, then np.greater is equivalent to '>'. >>> a = np.array([4,2]) >>> b = np.array([2,2]) >>> a > b - array([ True, False], dtype=bool) + array([ True, False]) """) @@ -1424,7 +1426,7 @@ add_newdoc('numpy.core.umath', 'greater_equal', Examples -------- >>> np.greater_equal([4, 2, 1], [2, 2, 2]) - array([ True, True, False], dtype=bool) + array([ True, True, False]) """) @@ -1541,7 +1543,7 @@ add_newdoc('numpy.core.umath', 'invert', Booleans are accepted as well: >>> np.invert(array([True, False])) - array([False, True], dtype=bool) + array([False, True]) """) @@ -1599,7 +1601,7 @@ add_newdoc('numpy.core.umath', 'isfinite', >>> np.isfinite(np.NINF) False >>> np.isfinite([np.log(-1.),1.,np.log(0)]) - array([False, True, False], dtype=bool) + array([False, True, False]) >>> x = np.array([-np.inf, 0., np.inf]) >>> y = np.array([2, 2, 2]) @@ -1661,7 +1663,7 @@ add_newdoc('numpy.core.umath', 'isinf', >>> np.isinf(np.NINF) True >>> np.isinf([np.inf, -np.inf, 1.0, np.nan]) - array([ True, True, False, False], dtype=bool) + array([ True, True, False, False]) >>> x = np.array([-np.inf, 0., np.inf]) >>> y = np.array([2, 2, 2]) @@ -1709,7 +1711,7 @@ add_newdoc('numpy.core.umath', 'isnan', >>> np.isnan(np.inf) False >>> np.isnan([np.log(-1.),1.,np.log(0)]) - array([ True, False, False], dtype=bool) + array([ True, False, False]) """) @@ -1745,7 +1747,7 @@ add_newdoc('numpy.core.umath', 'isnat', >>> np.isnat(np.datetime64("2016-01-01")) False >>> np.isnat(np.array(["NaT", "2016-01-01"], dtype="datetime64[ns]")) - array([ True, False], dtype=bool) + array([ True, False]) """) @@ -1814,7 +1816,7 @@ add_newdoc('numpy.core.umath', 'less', Examples -------- >>> np.less([1, 2], [2, 2]) - array([ True, False], dtype=bool) + array([ True, False]) """) @@ -1842,7 +1844,7 @@ add_newdoc('numpy.core.umath', 'less_equal', Examples -------- >>> np.less_equal([4, 2, 1], [2, 2, 2]) - array([False, True, True], dtype=bool) + array([False, True, True]) """) @@ -2155,11 +2157,11 @@ add_newdoc('numpy.core.umath', 'logical_and', >>> np.logical_and(True, False) False >>> np.logical_and([True, False], [False, False]) - array([False, False], dtype=bool) + array([False, False]) >>> x = np.arange(5) >>> np.logical_and(x>1, x<4) - array([False, False, True, True, False], dtype=bool) + array([False, False, True, True, False]) """) @@ -2188,11 +2190,11 @@ add_newdoc('numpy.core.umath', 'logical_not', >>> np.logical_not(3) False >>> np.logical_not([True, False, 0, 1]) - array([False, True, True, False], dtype=bool) + array([False, True, True, False]) >>> x = np.arange(5) >>> np.logical_not(x<3) - array([False, False, False, True, True], dtype=bool) + array([False, False, False, True, True]) """) @@ -2223,11 +2225,11 @@ add_newdoc('numpy.core.umath', 'logical_or', >>> np.logical_or(True, False) True >>> np.logical_or([True, False], [False, False]) - array([ True, False], dtype=bool) + array([ True, False]) >>> x = np.arange(5) >>> np.logical_or(x < 1, x > 3) - array([ True, False, False, False, True], dtype=bool) + array([ True, False, False, False, True]) """) @@ -2258,17 +2260,17 @@ add_newdoc('numpy.core.umath', 'logical_xor', >>> np.logical_xor(True, False) True >>> np.logical_xor([True, True, False, False], [True, False, True, False]) - array([False, True, True, False], dtype=bool) + array([False, True, True, False]) >>> x = np.arange(5) >>> np.logical_xor(x < 1, x > 3) - array([ True, False, False, False, True], dtype=bool) + array([ True, False, False, False, True]) Simple example showing support of broadcasting >>> np.logical_xor(0, np.eye(2)) array([[ True, False], - [False, True]], dtype=bool) + [False, True]]) """) @@ -2647,10 +2649,10 @@ add_newdoc('numpy.core.umath', 'not_equal', Examples -------- >>> np.not_equal([1.,2.], [1., 3.]) - array([False, True], dtype=bool) + array([False, True]) >>> np.not_equal([1, 2], [[1, 3],[1, 4]]) array([[False, True], - [False, True]], dtype=bool) + [False, True]]) """) @@ -3102,7 +3104,7 @@ add_newdoc('numpy.core.umath', 'signbit', >>> np.signbit(-1.2) True >>> np.signbit(np.array([1, -2.3, 2.1])) - array([False, True, False], dtype=bool) + array([False, True, False]) """) @@ -3166,7 +3168,7 @@ add_newdoc('numpy.core.umath', 'nextafter', >>> np.nextafter(1, 2) == eps + 1 True >>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps] - array([ True, True], dtype=bool) + array([ True, True]) """) @@ -3679,3 +3681,63 @@ add_newdoc('numpy.core.umath', 'ldexp', array([ 0., 1., 2., 3., 4., 5.]) """) + +add_newdoc('numpy.core.umath', 'gcd', + """ + Returns the greatest common divisor of |x1| and |x2| + + Parameters + ---------- + x1, x2 : array_like, int + Arrays of values + + Returns + ------- + y : ndarray or scalar + The greatest common divisor of the absolute value of the inputs + + See Also + -------- + lcm : The lowest common multiple + + Examples + -------- + >>> np.gcd(12, 20) + 4 + >>> np.gcd.reduce([15, 25, 35]) + 5 + >>> np.gcd(np.arange(6), 20) + array([20, 1, 2, 1, 4, 5]) + + """) + +add_newdoc('numpy.core.umath', 'lcm', + """ + Returns the lowest common multiple of |x1| and |x2| + + Parameters + ---------- + x1, x2 : array_like, int + Arrays of values + + Returns + ------- + y : ndarray or scalar + The lowest common multiple of the absolute value of the inputs + + See Also + -------- + gcd : The greatest common divisor + + Examples + -------- + >>> np.lcm(12, 20) + 60 + >>> np.lcm.reduce([3, 12, 20]) + 60 + >>> np.lcm.reduce([40, 12, 20]) + 120 + >>> np.lcm(np.arange(6), 20) + array([ 0, 20, 20, 60, 20, 20]) + + """) diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py index e5f685369..6d0a0add5 100644 --- a/numpy/core/defchararray.py +++ b/numpy/core/defchararray.py @@ -575,9 +575,9 @@ def endswith(a, suffix, start=0, end=None): array(['foo', 'bar'], dtype='|S3') >>> np.char.endswith(s, 'ar') - array([False, True], dtype=bool) + array([False, True]) >>> np.char.endswith(s, 'a', start=1, end=2) - array([False, True], dtype=bool) + array([False, True]) """ return _vec_string( diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py index 1ea3e598c..8cd6eae12 100644 --- a/numpy/core/einsumfunc.py +++ b/numpy/core/einsumfunc.py @@ -4,6 +4,7 @@ Implementation of optimized einsum. """ from __future__ import division, absolute_import, print_function +from numpy.compat import basestring from numpy.core.multiarray import c_einsum from numpy.core.numeric import asarray, asanyarray, result_type, tensordot, dot @@ -399,7 +400,7 @@ def _parse_einsum_input(operands): if len(operands) == 0: raise ValueError("No input operands") - if isinstance(operands[0], str): + if isinstance(operands[0], basestring): subscripts = operands[0].replace(" ", "") operands = [asanyarray(v) for v in operands[1:]] @@ -595,7 +596,7 @@ def einsum_path(*operands, **kwargs): -------- We can begin with a chain dot example. In this case, it is optimal to - contract the ``b`` and ``c`` tensors first as reprsented by the first + contract the ``b`` and ``c`` tensors first as represented by the first element of the path ``(1, 2)``. The resulting tensor is added to the end of the contraction and the remaining contraction ``(0, 1)`` is then completed. @@ -665,7 +666,7 @@ def einsum_path(*operands, **kwargs): memory_limit = None # No optimization or a named path algorithm - if (path_type is False) or isinstance(path_type, str): + if (path_type is False) or isinstance(path_type, basestring): pass # Given an explicit path @@ -673,7 +674,7 @@ def einsum_path(*operands, **kwargs): pass # Path tuple with memory limit - elif ((len(path_type) == 2) and isinstance(path_type[0], str) and + elif ((len(path_type) == 2) and isinstance(path_type[0], basestring) and isinstance(path_type[1], (int, float))): memory_limit = int(path_type[1]) path_type = path_type[0] @@ -700,14 +701,18 @@ def einsum_path(*operands, **kwargs): sh = operands[tnum].shape if len(sh) != len(term): raise ValueError("Einstein sum subscript %s does not contain the " - "correct number of indices for operand %d.", - input_subscripts[tnum], tnum) + "correct number of indices for operand %d." + % (input_subscripts[tnum], tnum)) for cnum, char in enumerate(term): dim = sh[cnum] if char in dimension_dict.keys(): - if dimension_dict[char] != dim: - raise ValueError("Size of label '%s' for operand %d does " - "not match previous terms.", char, tnum) + # For broadcasting cases we always want the largest dim size + if dimension_dict[char] == 1: + dimension_dict[char] = dim + elif dim not in (1, dimension_dict[char]): + raise ValueError("Size of label '%s' for operand %d (%d) " + "does not match previous terms (%d)." + % (char, tnum, dimension_dict[char], dim)) else: dimension_dict[char] = dim @@ -723,7 +728,7 @@ def einsum_path(*operands, **kwargs): memory_arg = memory_limit # Compute naive cost - # This isnt quite right, need to look into exactly how einsum does this + # This isn't quite right, need to look into exactly how einsum does this naive_cost = _compute_size_by_dict(indices, dimension_dict) indices_in_input = input_subscripts.replace(',', '') mult = max(len(input_list) - 1, 1) @@ -1056,8 +1061,8 @@ def einsum(*operands, **kwargs): """ - # Grab non-einsum kwargs - optimize_arg = kwargs.pop('optimize', False) + # Grab non-einsum kwargs; never optimize 2-argument case. + optimize_arg = kwargs.pop('optimize', len(operands) > 3) # If no optimization, run pure einsum if optimize_arg is False: @@ -1099,13 +1104,22 @@ def einsum(*operands, **kwargs): if specified_out and ((num + 1) == len(contraction_list)): handle_out = True - # Call tensordot + # Handle broadcasting vs BLAS cases if blas: - # Checks have already been handled input_str, results_index = einsum_str.split('->') input_left, input_right = input_str.split(',') - + if 1 in tmp_operands[0] or 1 in tmp_operands[1]: + left_dims = {dim: size for dim, size in + zip(input_left, tmp_operands[0].shape)} + right_dims = {dim: size for dim, size in + zip(input_right, tmp_operands[1].shape)} + # If dims do not match we are broadcasting, BLAS off + if any(left_dims[ind] != right_dims[ind] for ind in idx_rm): + blas = False + + # Call tensordot if still possible + if blas: tensor_result = input_left + input_right for s in idx_rm: tensor_result = tensor_result.replace(s, "") diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index ebeea6319..43584349f 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -66,15 +66,28 @@ def take(a, indices, axis=None, out=None, mode='raise'): """ Take elements from an array along an axis. - This function does the same thing as "fancy" indexing (indexing arrays - using arrays); however, it can be easier to use if you need elements - along a given axis. + When axis is not None, this function does the same thing as "fancy" + indexing (indexing arrays using arrays); however, it can be easier to use + if you need elements along a given axis. A call such as + ``np.take(arr, indices, axis=3)`` is equivalent to + ``arr[:,:,:,indices,...]``. + + Explained without fancy indexing, this is equivalent to the following use + of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of + indices:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + Nj = indices.shape + for ii in ndindex(Ni): + for jj in ndindex(Nj): + for kk in ndindex(Nk): + out[ii + jj + kk] = a[ii + (indices[jj],) + kk] Parameters ---------- - a : array_like + a : array_like (Ni..., M, Nk...) The source array. - indices : array_like + indices : array_like (Nj...) The indices of the values to extract. .. versionadded:: 1.8.0 @@ -83,7 +96,7 @@ def take(a, indices, axis=None, out=None, mode='raise'): axis : int, optional The axis over which to select values. By default, the flattened input array is used. - out : ndarray, optional + out : ndarray, optional (Ni..., Nj..., Nk...) If provided, the result will be placed in this array. It should be of the appropriate shape and dtype. mode : {'raise', 'wrap', 'clip'}, optional @@ -99,7 +112,7 @@ def take(a, indices, axis=None, out=None, mode='raise'): Returns ------- - subarray : ndarray + out : ndarray (Ni..., Nj..., Nk...) The returned array has the same type as `a`. See Also @@ -107,6 +120,23 @@ def take(a, indices, axis=None, out=None, mode='raise'): compress : Take elements using a boolean mask ndarray.take : equivalent method + Notes + ----- + + By eliminating the inner loop in the description above, and using `s_` to + build simple slice objects, `take` can be expressed in terms of applying + fancy indexing to each 1-d slice:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nj): + out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] + + For this reason, it is equivalent to (but faster than) the following use + of `apply_along_axis`:: + + out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a) + Examples -------- >>> a = [4, 3, 5, 7, 6, 8] @@ -171,11 +201,11 @@ def reshape(a, newshape, order='C'): Notes ----- It is not always possible to change the shape of an array without - copying the data. If you want an error to be raise if the data is copied, + copying the data. If you want an error to be raised when the data is copied, you should assign the new shape to the shape attribute of the array:: >>> a = np.zeros((10, 2)) - # A transpose make the array non-contiguous + # A transpose makes the array non-contiguous >>> b = a.T # Taking a view makes it possible to modify the shape without modifying # the initial object. @@ -566,7 +596,7 @@ def partition(a, kth, axis=-1, kind='introselect', order=None): Element index to partition by. The k-th value of the element will be in its final sorted position and all smaller elements will be moved before it and all equal or greater elements behind - it. The order all elements in the partitions is undefined. If + it. The order of all elements in the partitions is undefined. If provided with a sequence of k-th it will partition all elements indexed by k-th of them into their sorted position at once. axis : int or None, optional @@ -1046,6 +1076,15 @@ def searchsorted(a, v, side='left', sorter=None): corresponding elements in `v` were inserted before the indices, the order of `a` would be preserved. + Assuming that `a` is sorted: + + ====== ============================ + `side` returned index `i` satisfies + ====== ============================ + left ``a[i-1] < v <= a[i]`` + right ``a[i-1] <= v < a[i]`` + ====== ============================ + Parameters ---------- a : 1-D array_like @@ -1081,6 +1120,10 @@ def searchsorted(a, v, side='left', sorter=None): As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing `nan` values. The enhanced sort order is documented in `sort`. + This function is a faster version of the builtin python `bisect.bisect_left` + (``side='left'``) and `bisect.bisect_right` (``side='right'``) functions, + which is also vectorized in the `v` argument. + Examples -------- >>> np.searchsorted([1,2,3,4,5], 3) @@ -1567,7 +1610,7 @@ def nonzero(a): >>> a > 3 array([[False, False, False], [ True, True, True], - [ True, True, True]], dtype=bool) + [ True, True, True]]) >>> np.nonzero(a > 3) (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) @@ -1782,7 +1825,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): If the default value is passed, then `keepdims` will not be passed through to the `sum` method of sub-classes of `ndarray`, however any non-default value will be. If the - sub-classes `sum` method does not implement `keepdims` any + sub-class' method does not implement `keepdims` any exceptions will be raised. Returns @@ -1936,7 +1979,7 @@ def any(a, axis=None, out=None, keepdims=np._NoValue): If the default value is passed, then `keepdims` will not be passed through to the `any` method of sub-classes of `ndarray`, however any non-default value will be. If the - sub-classes `sum` method does not implement `keepdims` any + sub-class' method does not implement `keepdims` any exceptions will be raised. Returns @@ -1962,7 +2005,7 @@ def any(a, axis=None, out=None, keepdims=np._NoValue): True >>> np.any([[True, False], [False, False]], axis=0) - array([ True, False], dtype=bool) + array([ True, False]) >>> np.any([-1, 0, 5]) True @@ -1973,7 +2016,7 @@ def any(a, axis=None, out=None, keepdims=np._NoValue): >>> o=np.array([False]) >>> z=np.any([-1, 4, 5], out=o) >>> z, o - (array([ True], dtype=bool), array([ True], dtype=bool)) + (array([ True]), array([ True])) >>> # Check now that z is a reference to o >>> z is o True @@ -2021,7 +2064,7 @@ def all(a, axis=None, out=None, keepdims=np._NoValue): If the default value is passed, then `keepdims` will not be passed through to the `all` method of sub-classes of `ndarray`, however any non-default value will be. If the - sub-classes `sum` method does not implement `keepdims` any + sub-class' method does not implement `keepdims` any exceptions will be raised. Returns @@ -2047,7 +2090,7 @@ def all(a, axis=None, out=None, keepdims=np._NoValue): False >>> np.all([[True,False],[True,True]], axis=0) - array([ True, False], dtype=bool) + array([ True, False]) >>> np.all([-1, 4, 5]) True @@ -2058,7 +2101,7 @@ def all(a, axis=None, out=None, keepdims=np._NoValue): >>> o=np.array([False]) >>> z=np.all([-1, 4, 5], out=o) >>> id(z), id(o), z # doctest: +SKIP - (28293632, 28293632, array([ True], dtype=bool)) + (28293632, 28293632, array([ True])) """ arr = asanyarray(a) @@ -2148,7 +2191,7 @@ def cumproduct(a, axis=None, dtype=None, out=None): return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out) -def ptp(a, axis=None, out=None): +def ptp(a, axis=None, out=None, keepdims=np._NoValue): """ Range of values (maximum - minimum) along an axis. @@ -2158,14 +2201,31 @@ def ptp(a, axis=None, out=None): ---------- a : array_like Input values. - axis : int, optional + axis : None or int or tuple of ints, optional Axis along which to find the peaks. By default, flatten the - array. + array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.15.0 + + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. out : array_like Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type of the output values will be cast if necessary. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `ptp` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + Returns ------- ptp : ndarray @@ -2186,7 +2246,17 @@ def ptp(a, axis=None, out=None): array([1, 1]) """ - return _wrapfunc(a, 'ptp', axis=axis, out=out) + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if type(a) is not mu.ndarray: + try: + ptp = a.ptp + except AttributeError: + pass + else: + return ptp(axis=axis, out=out, **kwargs) + return _methods._ptp(a, axis=axis, out=out, **kwargs) def amax(a, axis=None, out=None, keepdims=np._NoValue): @@ -2218,7 +2288,7 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue): If the default value is passed, then `keepdims` will not be passed through to the `amax` method of sub-classes of `ndarray`, however any non-default value will be. If the - sub-classes `sum` method does not implement `keepdims` any + sub-class' method does not implement `keepdims` any exceptions will be raised. Returns @@ -2319,7 +2389,7 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue): If the default value is passed, then `keepdims` will not be passed through to the `amin` method of sub-classes of `ndarray`, however any non-default value will be. If the - sub-classes `sum` method does not implement `keepdims` any + sub-class' method does not implement `keepdims` any exceptions will be raised. Returns @@ -2461,7 +2531,7 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): If the default value is passed, then `keepdims` will not be passed through to the `prod` method of sub-classes of `ndarray`, however any non-default value will be. If the - sub-classes `sum` method does not implement `keepdims` any + sub-class' method does not implement `keepdims` any exceptions will be raised. Returns @@ -2860,7 +2930,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): If the default value is passed, then `keepdims` will not be passed through to the `mean` method of sub-classes of `ndarray`, however any non-default value will be. If the - sub-classes `sum` method does not implement `keepdims` any + sub-class' method does not implement `keepdims` any exceptions will be raised. Returns @@ -2967,7 +3037,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): If the default value is passed, then `keepdims` will not be passed through to the `std` method of sub-classes of `ndarray`, however any non-default value will be. If the - sub-classes `sum` method does not implement `keepdims` any + sub-class' method does not implement `keepdims` any exceptions will be raised. Returns @@ -3086,7 +3156,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): If the default value is passed, then `keepdims` will not be passed through to the `var` method of sub-classes of `ndarray`, however any non-default value will be. If the - sub-classes `sum` method does not implement `keepdims` any + sub-class' method does not implement `keepdims` any exceptions will be raised. Returns diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h index f26d64efb..4e63868f3 100644 --- a/numpy/core/include/numpy/ndarrayobject.h +++ b/numpy/core/include/numpy/ndarrayobject.h @@ -171,15 +171,16 @@ extern "C" CONFUSE_EMACS (l)*PyArray_STRIDES(obj)[3])) static NPY_INLINE void -PyArray_XDECREF_ERR(PyArrayObject *arr) +PyArray_DiscardWritebackIfCopy(PyArrayObject *arr) { if (arr != NULL) { - if (PyArray_FLAGS(arr) & NPY_ARRAY_UPDATEIFCOPY) { + if ((PyArray_FLAGS(arr) & NPY_ARRAY_WRITEBACKIFCOPY) || + (PyArray_FLAGS(arr) & NPY_ARRAY_UPDATEIFCOPY)) { PyArrayObject *base = (PyArrayObject *)PyArray_BASE(arr); PyArray_ENABLEFLAGS(base, NPY_ARRAY_WRITEABLE); + PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY); PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY); } - Py_DECREF(arr); } } @@ -238,6 +239,19 @@ PyArray_XDECREF_ERR(PyArrayObject *arr) #define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1) #define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1) +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_14_API_VERSION) +static NPY_INLINE void +PyArray_XDECREF_ERR(PyArrayObject *arr) +{ + /* 2017-Nov-10 1.14 */ + DEPRECATE("PyArray_XDECREF_ERR is deprecated, call " + "PyArray_DiscardWritebackIfCopy then Py_XDECREF instead"); + PyArray_DiscardWritebackIfCopy(arr); + Py_XDECREF(arr); +} +#endif + #ifdef __cplusplus } diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index 8c5d855df..cf73cecea 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -235,29 +235,34 @@ typedef enum { * TIMEZONE: 5 * NULL TERMINATOR: 1 */ -#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21 + 3*5 + 1 + 3*6 + 6 + 1) +/* The FR in the unit names stands for frequency */ typedef enum { - NPY_FR_Y = 0, /* Years */ - NPY_FR_M = 1, /* Months */ - NPY_FR_W = 2, /* Weeks */ + /* Force signed enum type, must be -1 for code compatibility */ + NPY_FR_ERROR = -1, /* error or undetermined */ + + /* Start of valid units */ + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ /* Gap where 1.6 NPY_FR_B (value 3) was */ - NPY_FR_D = 4, /* Days */ - NPY_FR_h = 5, /* hours */ - NPY_FR_m = 6, /* minutes */ - NPY_FR_s = 7, /* seconds */ - NPY_FR_ms = 8, /* milliseconds */ - NPY_FR_us = 9, /* microseconds */ - NPY_FR_ns = 10,/* nanoseconds */ - NPY_FR_ps = 11,/* picoseconds */ - NPY_FR_fs = 12,/* femtoseconds */ - NPY_FR_as = 13,/* attoseconds */ - NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10, /* nanoseconds */ + NPY_FR_ps = 11, /* picoseconds */ + NPY_FR_fs = 12, /* femtoseconds */ + NPY_FR_as = 13, /* attoseconds */ + NPY_FR_GENERIC = 14 /* unbound units, can convert to anything */ } NPY_DATETIMEUNIT; /* * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS - * is technically one more than the actual number of units. + * is technically one more than the actual number of units. */ #define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) #define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC @@ -677,7 +682,7 @@ typedef struct tagPyArrayObject_fields { /* * This object is decref'd upon * deletion of array. Except in the - * case of UPDATEIFCOPY which has + * case of WRITEBACKIFCOPY which has * special handling. * * For views it points to the original @@ -688,9 +693,9 @@ typedef struct tagPyArrayObject_fields { * points to an object that should be * decref'd on deletion * - * For UPDATEIFCOPY flag this is an - * array to-be-updated upon deletion - * of this one + * For WRITEBACKIFCOPY flag this is an + * array to-be-updated upon calling + * PyArray_ResolveWritebackIfCopy */ PyObject *base; /* Pointer to type structure */ @@ -865,12 +870,13 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); /* * If this flag is set, then base contains a pointer to an array of * the same size that should be updated with the current contents of - * this array when this array is deallocated + * this array when PyArray_ResolveWritebackIfCopy is called. * * This flag may be requested in constructor functions. * This flag may be tested for in PyArray_FLAGS(arr). */ -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 /* Deprecated in 1.14 */ +#define NPY_ARRAY_WRITEBACKIFCOPY 0x2000 /* * NOTE: there are also internal flags defined in multiarray/arrayobject.h, @@ -895,10 +901,14 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); #define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) #define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_WRITEBACKIFCOPY) #define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) #define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) #define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_WRITEBACKIFCOPY) #define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ NPY_ARRAY_F_CONTIGUOUS | \ @@ -1044,7 +1054,7 @@ typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, #define NPY_ITER_CONTIG 0x00200000 /* The operand may be copied to satisfy requirements */ #define NPY_ITER_COPY 0x00400000 -/* The operand may be copied with UPDATEIFCOPY to satisfy requirements */ +/* The operand may be copied with WRITEBACKIFCOPY to satisfy requirements */ #define NPY_ITER_UPDATEIFCOPY 0x00800000 /* Allocate the operand if it is NULL */ #define NPY_ITER_ALLOCATE 0x01000000 diff --git a/numpy/core/include/numpy/noprefix.h b/numpy/core/include/numpy/noprefix.h index 45130d16e..041f30192 100644 --- a/numpy/core/include/numpy/noprefix.h +++ b/numpy/core/include/numpy/noprefix.h @@ -166,6 +166,7 @@ #define NOTSWAPPED NPY_NOTSWAPPED #define WRITEABLE NPY_WRITEABLE #define UPDATEIFCOPY NPY_UPDATEIFCOPY +#define WRITEBACKIFCOPY NPY_ARRAY_WRITEBACKIFCOPY #define ARR_HAS_DESCR NPY_ARR_HAS_DESCR #define BEHAVED NPY_BEHAVED #define BEHAVED_NS NPY_BEHAVED_NS diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h index c0aa1eb2e..56fbd99af 100644 --- a/numpy/core/include/numpy/npy_3kcompat.h +++ b/numpy/core/include/numpy/npy_3kcompat.h @@ -94,6 +94,8 @@ static NPY_INLINE int PyInt_Check(PyObject *op) { #define PyUString_InternFromString PyUnicode_InternFromString #define PyUString_Format PyUnicode_Format +#define PyBaseString_Check(obj) (PyUnicode_Check(obj)) + #else #define PyBytes_Type PyString_Type @@ -123,6 +125,8 @@ static NPY_INLINE int PyInt_Check(PyObject *op) { #define PyUString_InternFromString PyString_InternFromString #define PyUString_Format PyString_Format +#define PyBaseString_Check(obj) (PyBytes_Check(obj) || PyUnicode_Check(obj)) + #endif /* NPY_PY3K */ diff --git a/numpy/core/include/numpy/utils.h b/numpy/core/include/numpy/utils.h index cc968a354..32218b8c7 100644 --- a/numpy/core/include/numpy/utils.h +++ b/numpy/core/include/numpy/utils.h @@ -6,6 +6,8 @@ #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) # elif defined(__ICC) #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) + # elif defined(__clang__) + #define __COMP_NPY_UNUSED __attribute__ ((unused)) #else #define __COMP_NPY_UNUSED #endif diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index bf3f43444..5c8951474 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -42,7 +42,13 @@ else: import cPickle as pickle import __builtin__ as builtins -loads = pickle.loads + +def loads(*args, **kwargs): + # NumPy 1.15.0, 2017-12-10 + warnings.warn( + "np.core.numeric.loads is deprecated, use pickle.loads instead", + DeprecationWarning, stacklevel=2) + return pickle.loads(*args, **kwargs) __all__ = [ @@ -666,6 +672,7 @@ def require(a, dtype=None, requirements=None): OWNDATA : False WRITEABLE : True ALIGNED : True + WRITEBACKIFCOPY : False UPDATEIFCOPY : False >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) @@ -675,6 +682,7 @@ def require(a, dtype=None, requirements=None): OWNDATA : True WRITEABLE : True ALIGNED : True + WRITEBACKIFCOPY : False UPDATEIFCOPY : False """ @@ -821,12 +829,12 @@ def flatnonzero(a): """ Return indices that are non-zero in the flattened version of a. - This is equivalent to a.ravel().nonzero()[0]. + This is equivalent to np.nonzero(np.ravel(a))[0]. Parameters ---------- - a : ndarray - Input array. + a : array_like + Input data. Returns ------- @@ -854,7 +862,7 @@ def flatnonzero(a): array([-2, -1, 1, 2]) """ - return a.ravel().nonzero()[0] + return np.nonzero(np.ravel(a))[0] _mode_from_name_dict = {'v': 0, @@ -1191,7 +1199,7 @@ def tensordot(a, b, axes=2): [ True, True], [ True, True], [ True, True], - [ True, True]], dtype=bool) + [ True, True]]) An extended example taking advantage of the overloading of + and \\*: @@ -1899,7 +1907,7 @@ def fromfunction(function, shape, **kwargs): >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) array([[ True, False, False], [False, True, False], - [False, False, True]], dtype=bool) + [False, False, True]]) >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) array([[0, 1, 2], @@ -2132,6 +2140,10 @@ def load(file): load, save """ + # NumPy 1.15.0, 2017-12-10 + warnings.warn( + "np.core.numeric.load is deprecated, use pickle.load instead", + DeprecationWarning, stacklevel=2) if isinstance(file, type("")): file = open(file, "rb") return pickle.load(file) @@ -2264,6 +2276,9 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): relative difference (`rtol` * abs(`b`)) and the absolute difference `atol` are added together to compare against the absolute difference between `a` and `b`. + + .. warning:: The default `atol` is not appropriate for comparing numbers + that are much smaller than one (see Notes). Parameters ---------- @@ -2297,9 +2312,15 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) - The above equation is not symmetric in `a` and `b`, so that - `isclose(a, b)` might be different from `isclose(b, a)` in - some rare cases. + Unlike the built-in `math.isclose`, the above equation is not symmetric + in `a` and `b` -- it assumes `b` is the reference value -- so that + `isclose(a, b)` might be different from `isclose(b, a)`. Furthermore, + the default value of atol is not zero, and is used to determine what + small values should be considered close to zero. The default value is + appropriate for expected values of order unity: if the expected values + are significantly smaller than one, it can result in false positives. + `atol` should be carefully selected for the use case at hand. A zero value + for `atol` will result in `False` if either `a` or `b` is zero. Examples -------- @@ -2313,6 +2334,14 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): array([True, False]) >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) array([True, True]) + >>> np.isclose([1e-8, 1e-7], [0.0, 0.0]) + array([ True, False], dtype=bool) + >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0) + array([False, False], dtype=bool) + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0]) + array([ True, True], dtype=bool) + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0) + array([False, True], dtype=bool) """ def within_tol(x, y, atol, rtol): with errstate(invalid='ignore'): diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index b61f5e7bc..aa91ecb44 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -238,8 +238,8 @@ def bitname(obj): else: newname = name info = typeinfo[english_upper(newname)] - assert(info[-1] == obj) # sanity check - bits = info[2] + assert(info.type == obj) # sanity check + bits = info.bits except KeyError: # bit-width name base, bits = _evalname(name) @@ -284,51 +284,53 @@ def bitname(obj): def _add_types(): - for a in typeinfo.keys(): - name = english_lower(a) - if isinstance(typeinfo[a], tuple): - typeobj = typeinfo[a][-1] - + for type_name, info in typeinfo.items(): + name = english_lower(type_name) + if not isinstance(info, type): # define C-name and insert typenum and typechar references also - allTypes[name] = typeobj - sctypeDict[name] = typeobj - sctypeDict[typeinfo[a][0]] = typeobj - sctypeDict[typeinfo[a][1]] = typeobj + allTypes[name] = info.type + sctypeDict[name] = info.type + sctypeDict[info.char] = info.type + sctypeDict[info.num] = info.type else: # generic class - allTypes[name] = typeinfo[a] + allTypes[name] = info _add_types() def _add_aliases(): - for a in typeinfo.keys(): - name = english_lower(a) - if not isinstance(typeinfo[a], tuple): + for type_name, info in typeinfo.items(): + if isinstance(info, type): continue - typeobj = typeinfo[a][-1] + name = english_lower(type_name) + # insert bit-width version for this class (if relevant) - base, bit, char = bitname(typeobj) + base, bit, char = bitname(info.type) if base[-3:] == 'int' or char[0] in 'ui': continue if base != '': myname = "%s%d" % (base, bit) - if ((name != 'longdouble' and name != 'clongdouble') or - myname not in allTypes.keys()): - allTypes[myname] = typeobj - sctypeDict[myname] = typeobj + if (name not in ('longdouble', 'clongdouble') or + myname not in allTypes): + base_capitalize = english_capitalize(base) if base == 'complex': - na_name = '%s%d' % (english_capitalize(base), bit//2) + na_name = '%s%d' % (base_capitalize, bit//2) elif base == 'bool': - na_name = english_capitalize(base) - sctypeDict[na_name] = typeobj + na_name = base_capitalize else: - na_name = "%s%d" % (english_capitalize(base), bit) - sctypeDict[na_name] = typeobj - sctypeNA[na_name] = typeobj - sctypeDict[na_name] = typeobj - sctypeNA[typeobj] = na_name - sctypeNA[typeinfo[a][0]] = na_name + na_name = "%s%d" % (base_capitalize, bit) + + allTypes[myname] = info.type + + # add mapping for both the bit name and the numarray name + sctypeDict[myname] = info.type + sctypeDict[na_name] = info.type + + # add forward, reverse, and string mapping to numarray + sctypeNA[na_name] = info.type + sctypeNA[info.type] = na_name + sctypeNA[info.char] = na_name if char != '': - sctypeDict[char] = typeobj + sctypeDict[char] = info.type sctypeNA[char] = na_name _add_aliases() @@ -339,34 +341,22 @@ _add_aliases() def _add_integer_aliases(): _ctypes = ['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE'] for ctype in _ctypes: - val = typeinfo[ctype] - bits = val[2] - charname = 'i%d' % (bits//8,) - ucharname = 'u%d' % (bits//8,) - intname = 'int%d' % bits - UIntname = 'UInt%d' % bits - Intname = 'Int%d' % bits - uval = typeinfo['U'+ctype] - typeobj = val[-1] - utypeobj = uval[-1] - if intname not in allTypes.keys(): - uintname = 'uint%d' % bits - allTypes[intname] = typeobj - allTypes[uintname] = utypeobj - sctypeDict[intname] = typeobj - sctypeDict[uintname] = utypeobj - sctypeDict[Intname] = typeobj - sctypeDict[UIntname] = utypeobj - sctypeDict[charname] = typeobj - sctypeDict[ucharname] = utypeobj - sctypeNA[Intname] = typeobj - sctypeNA[UIntname] = utypeobj - sctypeNA[charname] = typeobj - sctypeNA[ucharname] = utypeobj - sctypeNA[typeobj] = Intname - sctypeNA[utypeobj] = UIntname - sctypeNA[val[0]] = Intname - sctypeNA[uval[0]] = UIntname + i_info = typeinfo[ctype] + u_info = typeinfo['U'+ctype] + bits = i_info.bits # same for both + + for info, charname, intname, Intname in [ + (i_info,'i%d' % (bits//8,), 'int%d' % bits, 'Int%d' % bits), + (u_info,'u%d' % (bits//8,), 'uint%d' % bits, 'UInt%d' % bits)]: + if intname not in allTypes.keys(): + allTypes[intname] = info.type + sctypeDict[intname] = info.type + sctypeDict[Intname] = info.type + sctypeDict[charname] = info.type + sctypeNA[Intname] = info.type + sctypeNA[charname] = info.type + sctypeNA[info.type] = Intname + sctypeNA[info.char] = Intname _add_integer_aliases() # We use these later @@ -427,11 +417,10 @@ _set_up_aliases() # Now, construct dictionary to lookup character codes from types _sctype2char_dict = {} def _construct_char_code_lookup(): - for name in typeinfo.keys(): - tup = typeinfo[name] - if isinstance(tup, tuple): - if tup[0] not in ['p', 'P']: - _sctype2char_dict[tup[-1]] = tup[0] + for name, info in typeinfo.items(): + if not isinstance(info, type): + if info.char not in ['p', 'P']: + _sctype2char_dict[info.type] = info.char _construct_char_code_lookup() @@ -776,15 +765,15 @@ _alignment = _typedict() _maxvals = _typedict() _minvals = _typedict() def _construct_lookups(): - for name, val in typeinfo.items(): - if not isinstance(val, tuple): + for name, info in typeinfo.items(): + if isinstance(info, type): continue - obj = val[-1] - nbytes[obj] = val[2] // 8 - _alignment[obj] = val[3] - if (len(val) > 5): - _maxvals[obj] = val[4] - _minvals[obj] = val[5] + obj = info.type + nbytes[obj] = info.bits // 8 + _alignment[obj] = info.alignment + if len(info) > 5: + _maxvals[obj] = info.max + _minvals[obj] = info.min else: _maxvals[obj] = None _minvals[obj] = None diff --git a/numpy/core/records.py b/numpy/core/records.py index b6ff8bf65..612d39322 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -38,10 +38,12 @@ from __future__ import division, absolute_import, print_function import sys import os +import warnings from . import numeric as sb from . import numerictypes as nt from numpy.compat import isfileobj, bytes, long +from .arrayprint import get_printoptions # All of the functions allow formats to be a dtype __all__ = ['record', 'recarray', 'format_parser'] @@ -222,10 +224,14 @@ class record(nt.void): __module__ = 'numpy' def __repr__(self): - return self.__str__() + if get_printoptions()['legacy'] == '1.13': + return self.__str__() + return super(record, self).__repr__() def __str__(self): - return str(self.item()) + if get_printoptions()['legacy'] == '1.13': + return str(self.item()) + return super(record, self).__str__() def __getattribute__(self, attr): if attr in ['setfield', 'getfield', 'dtype']: @@ -525,22 +531,25 @@ class recarray(ndarray): if repr_dtype.type is record: repr_dtype = sb.dtype((nt.void, repr_dtype)) prefix = "rec.array(" - fmt = 'rec.array(%s, %sdtype=%s)' + fmt = 'rec.array(%s,%sdtype=%s)' else: # otherwise represent it using np.array plus a view # This should only happen if the user is playing # strange games with dtypes. prefix = "array(" - fmt = 'array(%s, %sdtype=%s).view(numpy.recarray)' + fmt = 'array(%s,%sdtype=%s).view(numpy.recarray)' # get data/shape string. logic taken from numeric.array_repr if self.size > 0 or self.shape == (0,): - lst = sb.array2string(self, separator=', ', prefix=prefix) + lst = sb.array2string( + self, separator=', ', prefix=prefix, suffix=',') else: # show zero-length shape unless it is (0,) lst = "[], shape=%s" % (repr(self.shape),) lf = '\n'+' '*len(prefix) + if get_printoptions()['legacy'] == '1.13': + lf = ' ' + lf # trailing space return fmt % (lst, lf, repr_dtype) def field(self, attr, val=None): @@ -669,7 +678,7 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, try: retval = sb.array(recList, dtype=descr) - except TypeError: # list of lists instead of list of tuples + except (TypeError, ValueError): if (shape is None or shape == 0): shape = len(recList) if isinstance(shape, (int, long)): @@ -679,6 +688,12 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, _array = recarray(shape, descr) for k in range(_array.size): _array[k] = tuple(recList[k]) + # list of lists instead of list of tuples ? + # 2018-02-07, 1.14.1 + warnings.warn( + "fromrecords expected a list of tuples, may have received a list " + "of lists instead. In the future that will raise an error", + FutureWarning, stacklevel=2) return _array else: if shape is not None and retval.shape != shape: diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 22eb63f45..11b1acb07 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -6,6 +6,7 @@ import pickle import copy import sysconfig import warnings +import platform from os.path import join from numpy.distutils import log from distutils.dep_util import newer @@ -29,7 +30,7 @@ NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CH # XXX: ugly, we use a class to avoid calling twice some expensive functions in # config.h/numpyconfig.h. I don't see a better way because distutils force # config.h generation inside an Extension class, and as such sharing -# configuration informations between extensions is not easy. +# configuration information between extensions is not easy. # Using a pickled-based memoize does not work because config_cmd is an instance # method, which cPickle does not like. # @@ -556,7 +557,7 @@ def configuration(parent_package='',top_path=None): if NPY_RELAXED_STRIDES_DEBUG: moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1)) - # Check wether we can use inttypes (C99) formats + # Check whether we can use inttypes (C99) formats if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']): moredefs.append(('NPY_USE_C99_FORMATS', 1)) @@ -685,13 +686,16 @@ def configuration(parent_package='',top_path=None): join('src', 'npymath', 'npy_math_complex.c.src'), join('src', 'npymath', 'halffloat.c') ] + + # Must be true for CRT compilers but not MinGW/cygwin. See gh-9977. + is_msvc = platform.system() == 'Windows' config.add_installed_library('npymath', sources=npymath_sources + [get_mathlib_info], install_dir='lib', build_info={ - 'include_dirs' : [], - 'extra_compiler_args' : (['/GL-'] if sys.platform == 'win32' else []), - }) # empty list required for creating npy_math_internal.h + 'include_dirs' : [], # empty list required for creating npy_math_internal.h + 'extra_compiler_args' : (['/GL-'] if is_msvc else []), + }) config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config", subst_dict) config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config", @@ -746,6 +750,7 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'sequence.h'), join('src', 'multiarray', 'shape.h'), join('src', 'multiarray', 'strfuncs.h'), + join('src', 'multiarray', 'typeinfo.h'), join('src', 'multiarray', 'ucsnarrow.h'), join('src', 'multiarray', 'usertypes.h'), join('src', 'multiarray', 'vdot.h'), @@ -823,6 +828,7 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'scalartypes.c.src'), join('src', 'multiarray', 'strfuncs.c'), join('src', 'multiarray', 'temp_elide.c'), + join('src', 'multiarray', 'typeinfo.c'), join('src', 'multiarray', 'usertypes.c'), join('src', 'multiarray', 'ucsnarrow.c'), join('src', 'multiarray', 'vdot.c'), diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index 094cd1841..1fe953910 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -39,8 +39,8 @@ C_ABI_VERSION = 0x01000009 # 0x0000000a - 1.11.x # 0x0000000a - 1.12.x # 0x0000000b - 1.13.x -# 0x0000000b - 1.14.x -C_API_VERSION = 0x0000000b +# 0x0000000c - 1.14.x +C_API_VERSION = 0x0000000c class MismatchCAPIWarning(Warning): pass @@ -166,7 +166,7 @@ OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))', # variable attributes tested via "int %s a" % attribute OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"] -# Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h +# Subset of OPTIONAL_STDFUNCS which may already have HAVE_* defined by Python.h OPTIONAL_STDFUNCS_MAYBE = [ "expm1", "log1p", "acosh", "atanh", "asinh", "hypot", "copysign", "ftello", "fseeko" @@ -216,6 +216,21 @@ def check_long_double_representation(cmd): except (AttributeError, ValueError): pass + # Disable multi-file interprocedural optimization in the Intel compiler on Linux + # which generates intermediary object files and prevents checking the + # float representation. + elif (sys.platform != "win32" + and cmd.compiler.compiler_type.startswith('intel') + and '-ipo' in cmd.compiler.cc_exe): + newcompiler = cmd.compiler.cc_exe.replace(' -ipo', '') + cmd.compiler.set_executables( + compiler=newcompiler, + compiler_so=newcompiler, + compiler_cxx=newcompiler, + linker_exe=newcompiler, + linker_so=newcompiler + ' -shared' + ) + # We need to use _compile because we need the object filename src, obj = cmd._compile(body, None, None, 'c') try: diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index 026ad603a..319c25088 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -183,23 +183,25 @@ def vstack(tup): """ Stack arrays in sequence vertically (row wise). - Take a sequence of arrays and stack them vertically to make a single - array. Rebuild arrays divided by `vsplit`. + This is equivalent to concatenation along the first axis after 1-D arrays + of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by + `vsplit`. - This function continues to be supported for backward compatibility, but - you should prefer ``np.concatenate`` or ``np.stack``. The ``np.stack`` - function was added in NumPy 1.10. + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of ndarrays - Tuple containing arrays to be stacked. The arrays must have the same - shape along all but the first axis. + The arrays must have the same shape along all but the first axis. + 1-D arrays must have the same length. Returns ------- stacked : ndarray - The array formed by stacking the given arrays. + The array formed by stacking the given arrays, will be at least 2-D. See Also -------- @@ -210,11 +212,6 @@ def vstack(tup): vsplit : Split array into a list of multiple sub-arrays vertically. block : Assemble arrays from blocks. - Notes - ----- - Equivalent to ``np.concatenate(tup, axis=0)`` if `tup` contains arrays that - are at least 2-dimensional. - Examples -------- >>> a = np.array([1, 2, 3]) @@ -240,17 +237,20 @@ def hstack(tup): """ Stack arrays in sequence horizontally (column wise). - Take a sequence of arrays and stack them horizontally to make - a single array. Rebuild arrays divided by `hsplit`. + This is equivalent to concatenation along the second axis, except for 1-D + arrays where it concatenates along the first axis. Rebuilds arrays divided + by `hsplit`. - This function continues to be supported for backward compatibility, but - you should prefer ``np.concatenate`` or ``np.stack``. The ``np.stack`` - function was added in NumPy 1.10. + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of ndarrays - All arrays must have the same shape along all but the second axis. + The arrays must have the same shape along all but the second axis, + except 1-D arrays which can be any length. Returns ------- @@ -266,11 +266,6 @@ def hstack(tup): hsplit : Split array along second axis. block : Assemble arrays from blocks. - Notes - ----- - Equivalent to ``np.concatenate(tup, axis=1)`` if `tup` contains arrays that - are at least 2-dimensional. - Examples -------- >>> a = np.array((1,2,3)) @@ -365,78 +360,99 @@ def stack(arrays, axis=0, out=None): return _nx.concatenate(expanded_arrays, axis=axis, out=out) -class _Recurser(object): +def _block_check_depths_match(arrays, parent_index=[]): + """ + Recursive function checking that the depths of nested lists in `arrays` + all match. Mismatch raises a ValueError as described in the block + docstring below. + + The entire index (rather than just the depth) needs to be calculated + for each innermost list, in case an error needs to be raised, so that + the index of the offending list can be printed as part of the error. + + The parameter `parent_index` is the full index of `arrays` within the + nested lists passed to _block_check_depths_match at the top of the + recursion. + The return value is a pair. The first item returned is the full index + of an element (specifically the first element) from the bottom of the + nesting in `arrays`. An empty list at the bottom of the nesting is + represented by a `None` index. + The second item is the maximum of the ndims of the arrays nested in + `arrays`. + """ + def format_index(index): + idx_str = ''.join('[{}]'.format(i) for i in index if i is not None) + return 'arrays' + idx_str + if type(arrays) is tuple: + # not strictly necessary, but saves us from: + # - more than one way to do things - no point treating tuples like + # lists + # - horribly confusing behaviour that results when tuples are + # treated like ndarray + raise TypeError( + '{} is a tuple. ' + 'Only lists can be used to arrange blocks, and np.block does ' + 'not allow implicit conversion from tuple to ndarray.'.format( + format_index(parent_index) + ) + ) + elif type(arrays) is list and len(arrays) > 0: + idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) + for i, arr in enumerate(arrays)) + + first_index, max_arr_ndim = next(idxs_ndims) + for index, ndim in idxs_ndims: + if ndim > max_arr_ndim: + max_arr_ndim = ndim + if len(index) != len(first_index): + raise ValueError( + "List depths are mismatched. First element was at depth " + "{}, but there is an element at depth {} ({})".format( + len(first_index), + len(index), + format_index(index) + ) + ) + return first_index, max_arr_ndim + elif type(arrays) is list and len(arrays) == 0: + # We've 'bottomed out' on an empty list + return parent_index + [None], 0 + else: + # We've 'bottomed out' - arrays is either a scalar or an array + return parent_index, _nx.ndim(arrays) + + +def _block(arrays, max_depth, result_ndim): """ - Utility class for recursing over nested iterables + Internal implementation of block. `arrays` is the argument passed to + block. `max_depth` is the depth of nested lists within `arrays` and + `result_ndim` is the greatest of the dimensions of the arrays in + `arrays` and the depth of the lists in `arrays` (see block docstring + for details). """ - def __init__(self, recurse_if): - self.recurse_if = recurse_if - - def map_reduce(self, x, f_map=lambda x, **kwargs: x, - f_reduce=lambda x, **kwargs: x, - f_kwargs=lambda **kwargs: kwargs, - **kwargs): - """ - Iterate over the nested list, applying: - * ``f_map`` (T -> U) to items - * ``f_reduce`` (Iterable[U] -> U) to mapped items - - For instance, ``map_reduce([[1, 2], 3, 4])`` is:: - - f_reduce([ - f_reduce([ - f_map(1), - f_map(2) - ]), - f_map(3), - f_map(4) - ]]) - - - State can be passed down through the calls with `f_kwargs`, - to iterables of mapped items. When kwargs are passed, as in - ``map_reduce([[1, 2], 3, 4], **kw)``, this becomes:: - - kw1 = f_kwargs(**kw) - kw2 = f_kwargs(**kw1) - f_reduce([ - f_reduce([ - f_map(1), **kw2) - f_map(2, **kw2) - ], **kw1), - f_map(3, **kw1), - f_map(4, **kw1) - ]], **kw) - """ - def f(x, **kwargs): - if not self.recurse_if(x): - return f_map(x, **kwargs) - else: - next_kwargs = f_kwargs(**kwargs) - return f_reduce(( - f(xi, **next_kwargs) - for xi in x - ), **kwargs) - return f(x, **kwargs) - - def walk(self, x, index=()): - """ - Iterate over x, yielding (index, value, entering), where - - * ``index``: a tuple of indices up to this point - * ``value``: equal to ``x[index[0]][...][index[-1]]``. On the first iteration, is - ``x`` itself - * ``entering``: bool. The result of ``recurse_if(value)`` - """ - do_recurse = self.recurse_if(x) - yield index, x, do_recurse - - if not do_recurse: - return - for i, xi in enumerate(x): - # yield from ... - for v in self.walk(xi, index + (i,)): - yield v + def atleast_nd(a, ndim): + # Ensures `a` has at least `ndim` dimensions by prepending + # ones to `a.shape` as necessary + return array(a, ndmin=ndim, copy=False, subok=True) + + def block_recursion(arrays, depth=0): + if depth < max_depth: + if len(arrays) == 0: + raise ValueError('Lists cannot be empty') + arrs = [block_recursion(arr, depth+1) for arr in arrays] + return _nx.concatenate(arrs, axis=-(max_depth-depth)) + else: + # We've 'bottomed out' - arrays is either a scalar or an array + # type(arrays) is not list + return atleast_nd(arrays, result_ndim) + + try: + return block_recursion(arrays) + finally: + # recursive closures have a cyclic reference to themselves, which + # requires gc to collect (gh-10620). To avoid this problem, for + # performance and PyPy friendliness, we break the cycle: + block_recursion = None def block(arrays): @@ -587,81 +603,6 @@ def block(arrays): """ - def atleast_nd(x, ndim): - x = asanyarray(x) - diff = max(ndim - x.ndim, 0) - return x[(None,)*diff + (Ellipsis,)] - - def format_index(index): - return 'arrays' + ''.join('[{}]'.format(i) for i in index) - - rec = _Recurser(recurse_if=lambda x: type(x) is list) - - # ensure that the lists are all matched in depth - list_ndim = None - any_empty = False - for index, value, entering in rec.walk(arrays): - if type(value) is tuple: - # not strictly necessary, but saves us from: - # - more than one way to do things - no point treating tuples like - # lists - # - horribly confusing behaviour that results when tuples are - # treated like ndarray - raise TypeError( - '{} is a tuple. ' - 'Only lists can be used to arrange blocks, and np.block does ' - 'not allow implicit conversion from tuple to ndarray.'.format( - format_index(index) - ) - ) - if not entering: - curr_depth = len(index) - elif len(value) == 0: - curr_depth = len(index) + 1 - any_empty = True - else: - continue - - if list_ndim is not None and list_ndim != curr_depth: - raise ValueError( - "List depths are mismatched. First element was at depth {}, " - "but there is an element at depth {} ({})".format( - list_ndim, - curr_depth, - format_index(index) - ) - ) - list_ndim = curr_depth - - # do this here so we catch depth mismatches first - if any_empty: - raise ValueError('Lists cannot be empty') - - # convert all the arrays to ndarrays - arrays = rec.map_reduce(arrays, - f_map=asanyarray, - f_reduce=list - ) - - # determine the maximum dimension of the elements - elem_ndim = rec.map_reduce(arrays, - f_map=lambda xi: xi.ndim, - f_reduce=max - ) - ndim = max(list_ndim, elem_ndim) - - # first axis to concatenate along - first_axis = ndim - list_ndim - - # Make all the elements the same dimension - arrays = rec.map_reduce(arrays, - f_map=lambda xi: atleast_nd(xi, ndim), - f_reduce=list - ) - - # concatenate innermost lists on the right, outermost on the left - return rec.map_reduce(arrays, - f_reduce=lambda xs, axis: _nx.concatenate(list(xs), axis=axis), - f_kwargs=lambda axis: dict(axis=axis+1), - axis=first_axis - ) + bottom_index, arr_ndim = _block_check_depths_match(arrays) + list_ndim = len(bottom_index) + return _block(arrays, list_ndim, max(arr_ndim, list_ndim)) diff --git a/numpy/core/src/multiarray/array_assign_array.c b/numpy/core/src/multiarray/array_assign_array.c index d1bce8c3b..74fbb88c2 100644 --- a/numpy/core/src/multiarray/array_assign_array.c +++ b/numpy/core/src/multiarray/array_assign_array.c @@ -346,6 +346,21 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src, } } + /* optimization: scalar boolean mask */ + if (wheremask != NULL && + PyArray_NDIM(wheremask) == 0 && + PyArray_DESCR(wheremask)->type_num == NPY_BOOL) { + npy_bool value = *(npy_bool *)PyArray_DATA(wheremask); + if (value) { + /* where=True is the same as no where at all */ + wheremask = NULL; + } + else { + /* where=False copies nothing */ + return 0; + } + } + if (wheremask == NULL) { /* A straightforward value assignment */ /* Do the assignment with raw array iteration */ diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c index 7c1b1f16a..3d259ae05 100644 --- a/numpy/core/src/multiarray/array_assign_scalar.c +++ b/numpy/core/src/multiarray/array_assign_scalar.c @@ -233,7 +233,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst, * Use a static buffer to store the aligned/cast version, * or allocate some memory if more space is needed. */ - if (sizeof(scalarbuffer) >= PyArray_DESCR(dst)->elsize) { + if ((int)sizeof(scalarbuffer) >= PyArray_DESCR(dst)->elsize) { tmp_src_data = (char *)&scalarbuffer[0]; } else { diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c index 1d4816d96..0aaf27b27 100644 --- a/numpy/core/src/multiarray/arrayobject.c +++ b/numpy/core/src/multiarray/arrayobject.c @@ -76,7 +76,7 @@ PyArray_Size(PyObject *op) * Precondition: 'arr' is a copy of 'base' (though possibly with different * strides, ordering, etc.). This function sets the UPDATEIFCOPY flag and the * ->base pointer on 'arr', so that when 'arr' is destructed, it will copy any - * changes back to 'base'. + * changes back to 'base'. DEPRECATED, use PyArray_SetWritebackIfCopyBase * * Steals a reference to 'base'. * @@ -85,17 +85,59 @@ PyArray_Size(PyObject *op) NPY_NO_EXPORT int PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) { + int ret; +#ifdef PYPY_VERSION + #ifndef DEPRECATE_UPDATEIFCOPY + #define DEPRECATE_UPDATEIFCOPY + #endif +#endif + +#ifdef DEPRECATE_UPDATEIFCOPY + /* TODO: enable this once a solution for UPDATEIFCOPY + * and nditer are resolved, also pending the fix for GH7054 + */ + /* 2017-Nov-10 1.14 */ + if (DEPRECATE("PyArray_SetUpdateIfCopyBase is deprecated, use " + "PyArray_SetWritebackIfCopyBase instead, and be sure to call " + "PyArray_ResolveWritebackIfCopy before the array is deallocated, " + "i.e. before the last call to Py_DECREF. If cleaning up from an " + "error, PyArray_DiscardWritebackIfCopy may be called instead to " + "throw away the scratch buffer.") < 0) + return -1; +#endif + ret = PyArray_SetWritebackIfCopyBase(arr, base); + if (ret >=0) { + PyArray_ENABLEFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY); + PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY); + } + return ret; +} + +/*NUMPY_API + * + * Precondition: 'arr' is a copy of 'base' (though possibly with different + * strides, ordering, etc.). This function sets the WRITEBACKIFCOPY flag and the + * ->base pointer on 'arr', call PyArray_ResolveWritebackIfCopy to copy any + * changes back to 'base' before deallocating the array. + * + * Steals a reference to 'base'. + * + * Returns 0 on success, -1 on failure. + */ +NPY_NO_EXPORT int +PyArray_SetWritebackIfCopyBase(PyArrayObject *arr, PyArrayObject *base) +{ if (base == NULL) { PyErr_SetString(PyExc_ValueError, - "Cannot UPDATEIFCOPY to NULL array"); + "Cannot WRITEBACKIFCOPY to NULL array"); return -1; } if (PyArray_BASE(arr) != NULL) { PyErr_SetString(PyExc_ValueError, - "Cannot set array with existing base to UPDATEIFCOPY"); + "Cannot set array with existing base to WRITEBACKIFCOPY"); goto fail; } - if (PyArray_FailUnlessWriteable(base, "UPDATEIFCOPY base") < 0) { + if (PyArray_FailUnlessWriteable(base, "WRITEBACKIFCOPY base") < 0) { goto fail; } @@ -112,7 +154,7 @@ PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) * references. */ ((PyArrayObject_fields *)arr)->base = (PyObject *)base; - PyArray_ENABLEFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY); + PyArray_ENABLEFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY); PyArray_CLEARFLAGS(base, NPY_ARRAY_WRITEABLE); return 0; @@ -370,6 +412,45 @@ PyArray_TypeNumFromName(char *str) return NPY_NOTYPE; } +/*NUMPY_API + * + * If WRITEBACKIFCOPY and self has data, reset the base WRITEABLE flag, + * copy the local data to base, release the local data, and set flags + * appropriately. Return 0 if not relevant, 1 if success, < 0 on failure + */ +NPY_NO_EXPORT int +PyArray_ResolveWritebackIfCopy(PyArrayObject * self) +{ + PyArrayObject_fields *fa = (PyArrayObject_fields *)self; + if (fa && fa->base) { + if ((fa->flags & NPY_ARRAY_UPDATEIFCOPY) || (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY)) { + /* + * UPDATEIFCOPY or WRITEBACKIFCOPY means that fa->base's data + * should be updated with the contents + * of self. + * fa->base->flags is not WRITEABLE to protect the relationship + * unlock it. + */ + int retval = 0; + PyArray_ENABLEFLAGS(((PyArrayObject *)fa->base), + NPY_ARRAY_WRITEABLE); + PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY); + PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY); + retval = PyArray_CopyAnyInto((PyArrayObject *)fa->base, self); + Py_DECREF(fa->base); + fa->base = NULL; + if (retval < 0) { + /* this should never happen, how did the two copies of data + * get out of sync? + */ + return retval; + } + return 1; + } + } + return 0; +} + /*********************** end C-API functions **********************/ /* array object functions */ @@ -385,32 +466,45 @@ array_dealloc(PyArrayObject *self) PyObject_ClearWeakRefs((PyObject *)self); } if (fa->base) { - /* - * UPDATEIFCOPY means that base points to an - * array that should be updated with the contents - * of this array upon destruction. - * fa->base->flags must have been WRITEABLE - * (checked previously) and it was locked here - * thus, unlock it. - */ - if (fa->flags & NPY_ARRAY_UPDATEIFCOPY) { - PyArray_ENABLEFLAGS(((PyArrayObject *)fa->base), - NPY_ARRAY_WRITEABLE); - Py_INCREF(self); /* hold on to self in next call */ - if (PyArray_CopyAnyInto((PyArrayObject *)fa->base, self) < 0) { + int retval; + if (PyArray_FLAGS(self) & NPY_ARRAY_WRITEBACKIFCOPY) + { + char * msg = "WRITEBACKIFCOPY requires a call to " + "PyArray_ResolveWritebackIfCopy or " + "PyArray_DiscardWritebackIfCopy before array_dealloc is " + "called."; + /* 2017-Nov-10 1.14 */ + if (DEPRECATE(msg) < 0) { + /* dealloc cannot raise an error, best effort try to write + to stderr and clear the error + */ + PyErr_WriteUnraisable((PyObject *)&PyArray_Type); + } + retval = PyArray_ResolveWritebackIfCopy(self); + if (retval < 0) + { + PyErr_Print(); + PyErr_Clear(); + } + } + if (PyArray_FLAGS(self) & NPY_ARRAY_UPDATEIFCOPY) { + /* DEPRECATED, remove once the flag is removed */ + Py_INCREF(self); /* hold on to self in next call since if + * refcount == 0 it will recurse back into + *array_dealloc + */ + retval = PyArray_ResolveWritebackIfCopy(self); + if (retval < 0) + { PyErr_Print(); PyErr_Clear(); } - /* - * Don't need to DECREF -- because we are deleting - *self already... - */ } /* * In any case base is pointing to something that we need * to DECREF -- either a view or a buffer object */ - Py_DECREF(fa->base); + Py_XDECREF(fa->base); } if ((fa->flags & NPY_ARRAY_OWNDATA) && fa->data) { @@ -482,6 +576,8 @@ PyArray_DebugPrint(PyArrayObject *obj) printf(" NPY_WRITEABLE"); if (fobj->flags & NPY_ARRAY_UPDATEIFCOPY) printf(" NPY_UPDATEIFCOPY"); + if (fobj->flags & NPY_ARRAY_WRITEBACKIFCOPY) + printf(" NPY_WRITEBACKIFCOPY"); printf("\n"); if (fobj->base != NULL && PyArray_Check(fobj->base)) { @@ -505,8 +601,6 @@ PyArray_SetDatetimeParseFunction(PyObject *op) { } - - /*NUMPY_API */ NPY_NO_EXPORT int @@ -1111,6 +1205,56 @@ _void_compare(PyArrayObject *self, PyArrayObject *other, int cmp_op) } } +/* This is a copy of _PyErr_ChainExceptions, with: + * - a minimal implementation for python 2 + * - __cause__ used instead of __context__ + */ +NPY_NO_EXPORT void +PyArray_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb) +{ + if (exc == NULL) + return; + + if (PyErr_Occurred()) { + /* only py3 supports this anyway */ + #ifdef NPY_PY3K + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetCause(val2, val); + PyErr_Restore(exc2, val2, tb2); + #endif + } + else { + PyErr_Restore(exc, val, tb); + } +} + +/* Silence the current error and emit a deprecation warning instead. + * + * If warnings are raised as errors, this sets the warning __cause__ to the + * silenced error. + */ +NPY_NO_EXPORT int +DEPRECATE_silence_error(const char *msg) { + PyObject *exc, *val, *tb; + PyErr_Fetch(&exc, &val, &tb); + if (DEPRECATE(msg) < 0) { + PyArray_ChainExceptionsCause(exc, val, tb); + return -1; + } + Py_XDECREF(exc); + Py_XDECREF(val); + Py_XDECREF(tb); + return 0; +} + NPY_NO_EXPORT PyObject * array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) { @@ -1174,8 +1318,7 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) */ if (array_other == NULL) { /* 2015-05-07, 1.10 */ - PyErr_Clear(); - if (DEPRECATE( + if (DEPRECATE_silence_error( "elementwise == comparison failed and returning scalar " "instead; this will raise an error in the future.") < 0) { return NULL; @@ -1220,9 +1363,9 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) * is not possible. */ /* 2015-05-14, 1.10 */ - PyErr_Clear(); - if (DEPRECATE("elementwise == comparison failed; " - "this will raise an error in the future.") < 0) { + if (DEPRECATE_silence_error( + "elementwise == comparison failed; " + "this will raise an error in the future.") < 0) { return NULL; } @@ -1247,8 +1390,7 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) */ if (array_other == NULL) { /* 2015-05-07, 1.10 */ - PyErr_Clear(); - if (DEPRECATE( + if (DEPRECATE_silence_error( "elementwise != comparison failed and returning scalar " "instead; this will raise an error in the future.") < 0) { return NULL; @@ -1287,9 +1429,9 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) * is not possible. */ /* 2015-05-14, 1.10 */ - PyErr_Clear(); - if (DEPRECATE("elementwise != comparison failed; " - "this will raise an error in the future.") < 0) { + if (DEPRECATE_silence_error( + "elementwise != comparison failed; " + "this will raise an error in the future.") < 0) { return NULL; } diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index bfc39ed97..e8aa19416 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -24,6 +24,7 @@ #include "_datetime.h" #include "arrayobject.h" #include "alloc.h" +#include "typeinfo.h" #ifdef NPY_HAVE_SSE2_INTRINSICS #include <emmintrin.h> #endif @@ -730,6 +731,21 @@ VOID_getitem(void *input, void *vap) return (PyObject *)ret; } + /* 2017-11-26, 1.14 */ + if (DEPRECATE_FUTUREWARNING( + "the `.item()` method of unstructured void types will return an " + "immutable `bytes` object in the near future, the same as " + "returned by `bytes(void_obj)`, instead of the mutable memoryview " + "or integer array returned in numpy 1.13.") < 0) { + return NULL; + } + /* + * In the future all the code below will be replaced by + * + * For unstructured void types like V4, return a bytes object (copy). + * return PyBytes_FromStringAndSize(PyArray_DATA(ap), descr->elsize); + */ + if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) { PyErr_SetString(PyExc_ValueError, @@ -1683,7 +1699,7 @@ static void int skip = 1; int oskip = PyArray_DESCR(aop)->elsize; for (i = 0; i < n; i++, ip += skip, op += oskip) { - temp = @from@_getitem(ip, aip); + temp = PyArray_Scalar(ip, PyArray_DESCR(aip), (PyObject *)aip); if (temp == NULL) { Py_INCREF(Py_False); temp = Py_False; @@ -4805,21 +4821,19 @@ set_typeinfo(PyObject *dict) * #cn = i*7, N, i, l, i, N, i# */ - PyDict_SetItemString(infodict, "@name@", -#if defined(NPY_PY3K) - s = Py_BuildValue("Ciii@cx@@cn@O", -#else - s = Py_BuildValue("ciii@cx@@cn@O", -#endif - NPY_@name@LTR, - NPY_@name@, - NPY_BITSOF_@uname@, - _ALIGN(@type@), - @max@, - @min@, - (PyObject *) &Py@Name@ArrType_Type)); + s = PyArray_typeinforanged( + NPY_@name@LTR, NPY_@name@, NPY_BITSOF_@uname@, _ALIGN(@type@), + Py_BuildValue("@cx@", @max@), + Py_BuildValue("@cn@", @min@), + &Py@Name@ArrType_Type + ); + if (s == NULL) { + return -1; + } + PyDict_SetItemString(infodict, "@name@", s); Py_DECREF(s); + /**end repeat**/ @@ -4833,91 +4847,80 @@ set_typeinfo(PyObject *dict) * CFloat, CDouble, CLongDouble# * #num = 1, 1, 1, 1, 2, 2, 2# */ - - PyDict_SetItemString(infodict, "@name@", -#if defined(NPY_PY3K) - s = Py_BuildValue("CiiiO", NPY_@name@LTR, -#else - s = Py_BuildValue("ciiiO", NPY_@name@LTR, -#endif - NPY_@name@, - NPY_BITSOF_@name@, - @num@ * _ALIGN(@type@) > NPY_MAX_COPY_ALIGNMENT ? - NPY_MAX_COPY_ALIGNMENT : @num@ * _ALIGN(@type@), - (PyObject *) &Py@Name@ArrType_Type)); + s = PyArray_typeinfo( + NPY_@name@LTR, NPY_@name@, NPY_BITSOF_@name@, + @num@ * _ALIGN(@type@) > NPY_MAX_COPY_ALIGNMENT ? + NPY_MAX_COPY_ALIGNMENT : @num@ * _ALIGN(@type@), + &Py@Name@ArrType_Type + ); + if (s == NULL) { + return -1; + } + PyDict_SetItemString(infodict, "@name@", s); Py_DECREF(s); /**end repeat**/ - PyDict_SetItemString(infodict, "OBJECT", -#if defined(NPY_PY3K) - s = Py_BuildValue("CiiiO", NPY_OBJECTLTR, -#else - s = Py_BuildValue("ciiiO", NPY_OBJECTLTR, -#endif - NPY_OBJECT, - sizeof(PyObject *) * CHAR_BIT, - _ALIGN(PyObject *), - (PyObject *) &PyObjectArrType_Type)); + s = PyArray_typeinfo( + NPY_OBJECTLTR, NPY_OBJECT, sizeof(PyObject *) * CHAR_BIT, + _ALIGN(PyObject *), + &PyObjectArrType_Type + ); + if (s == NULL) { + return -1; + } + PyDict_SetItemString(infodict, "OBJECT", s); Py_DECREF(s); - PyDict_SetItemString(infodict, "STRING", -#if defined(NPY_PY3K) - s = Py_BuildValue("CiiiO", NPY_STRINGLTR, -#else - s = Py_BuildValue("ciiiO", NPY_STRINGLTR, -#endif - NPY_STRING, - 0, - _ALIGN(char), - (PyObject *) &PyStringArrType_Type)); + s = PyArray_typeinfo( + NPY_STRINGLTR, NPY_STRING, 0, _ALIGN(char), + &PyStringArrType_Type + ); + if (s == NULL) { + return -1; + } + PyDict_SetItemString(infodict, "STRING", s); Py_DECREF(s); - PyDict_SetItemString(infodict, "UNICODE", -#if defined(NPY_PY3K) - s = Py_BuildValue("CiiiO", NPY_UNICODELTR, -#else - s = Py_BuildValue("ciiiO", NPY_UNICODELTR, -#endif - NPY_UNICODE, - 0, - _ALIGN(npy_ucs4), - (PyObject *) &PyUnicodeArrType_Type)); + s = PyArray_typeinfo( + NPY_UNICODELTR, NPY_UNICODE, 0, _ALIGN(npy_ucs4), + &PyUnicodeArrType_Type + ); + if (s == NULL) { + return -1; + } + PyDict_SetItemString(infodict, "UNICODE", s); Py_DECREF(s); - PyDict_SetItemString(infodict, "VOID", -#if defined(NPY_PY3K) - s = Py_BuildValue("CiiiO", NPY_VOIDLTR, -#else - s = Py_BuildValue("ciiiO", NPY_VOIDLTR, -#endif - NPY_VOID, - 0, - _ALIGN(char), - (PyObject *) &PyVoidArrType_Type)); + s = PyArray_typeinfo( + NPY_VOIDLTR, NPY_VOID, 0, _ALIGN(char), + &PyVoidArrType_Type + ); + if (s == NULL) { + return -1; + } + PyDict_SetItemString(infodict, "VOID", s); Py_DECREF(s); - PyDict_SetItemString(infodict, "DATETIME", -#if defined(NPY_PY3K) - s = Py_BuildValue("CiiiNNO", NPY_DATETIMELTR, -#else - s = Py_BuildValue("ciiiNNO", NPY_DATETIMELTR, -#endif - NPY_DATETIME, - NPY_BITSOF_DATETIME, - _ALIGN(npy_datetime), - MyPyLong_FromInt64(NPY_MAX_DATETIME), - MyPyLong_FromInt64(NPY_MIN_DATETIME), - (PyObject *) &PyDatetimeArrType_Type)); + s = PyArray_typeinforanged( + NPY_DATETIMELTR, NPY_DATETIME, NPY_BITSOF_DATETIME, + _ALIGN(npy_datetime), + MyPyLong_FromInt64(NPY_MAX_DATETIME), + MyPyLong_FromInt64(NPY_MIN_DATETIME), + &PyDatetimeArrType_Type + ); + if (s == NULL) { + return -1; + } + PyDict_SetItemString(infodict, "DATETIME", s); Py_DECREF(s); - PyDict_SetItemString(infodict, "TIMEDELTA", -#if defined(NPY_PY3K) - s = Py_BuildValue("CiiiNNO", NPY_TIMEDELTALTR, -#else - s = Py_BuildValue("ciiiNNO",NPY_TIMEDELTALTR, -#endif - NPY_TIMEDELTA, - NPY_BITSOF_TIMEDELTA, - _ALIGN(npy_timedelta), - MyPyLong_FromInt64(NPY_MAX_TIMEDELTA), - MyPyLong_FromInt64(NPY_MIN_TIMEDELTA), - (PyObject *)&PyTimedeltaArrType_Type)); + s = PyArray_typeinforanged( + NPY_TIMEDELTALTR, NPY_TIMEDELTA, NPY_BITSOF_TIMEDELTA, + _ALIGN(npy_timedelta), + MyPyLong_FromInt64(NPY_MAX_TIMEDELTA), + MyPyLong_FromInt64(NPY_MIN_TIMEDELTA), + &PyTimedeltaArrType_Type + ); + if (s == NULL) { + return -1; + } + PyDict_SetItemString(infodict, "TIMEDELTA", s); Py_DECREF(s); #define SETTYPE(name) \ diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c index e76d406de..4aa25a196 100644 --- a/numpy/core/src/multiarray/buffer.c +++ b/numpy/core/src/multiarray/buffer.c @@ -12,6 +12,7 @@ #include "npy_pycompat.h" #include "buffer.h" +#include "common.h" #include "numpyos.h" #include "arrayobject.h" @@ -243,14 +244,19 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, child = (PyArray_Descr*)PyTuple_GetItem(item, 0); offset_obj = PyTuple_GetItem(item, 1); - new_offset = base_offset + PyInt_AsLong(offset_obj); + new_offset = PyInt_AsLong(offset_obj); + if (error_converting(new_offset)) { + return -1; + } + new_offset += base_offset; /* Insert padding manually */ if (*offset > new_offset) { - PyErr_SetString(PyExc_RuntimeError, - "This should never happen: Invalid offset in " - "buffer format string generation. Please " - "report a bug to the Numpy developers."); + PyErr_SetString( + PyExc_ValueError, + "dtypes with overlapping or out-of-order fields are not " + "representable as buffers. Consider reordering the fields." + ); return -1; } while (*offset < new_offset) { @@ -828,6 +834,7 @@ _descriptor_from_pep3118_format(char *s) /* Strip whitespace, except from field names */ buf = malloc(strlen(s) + 1); if (buf == NULL) { + PyErr_NoMemory(); return NULL; } p = buf; diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c index 379e5c3d2..e24ac2b57 100644 --- a/numpy/core/src/multiarray/calculation.c +++ b/numpy/core/src/multiarray/calculation.c @@ -118,7 +118,7 @@ PyArray_ArgMax(PyArrayObject *op, int axis, PyArrayObject *out) } rp = (PyArrayObject *)PyArray_FromArray(out, PyArray_DescrFromType(NPY_INTP), - NPY_ARRAY_CARRAY | NPY_ARRAY_UPDATEIFCOPY); + NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY); if (rp == NULL) { goto fail; } @@ -134,8 +134,9 @@ PyArray_ArgMax(PyArrayObject *op, int axis, PyArrayObject *out) NPY_END_THREADS_DESCR(PyArray_DESCR(ap)); Py_DECREF(ap); - /* Trigger the UPDATEIFCOPY if necessary */ + /* Trigger the UPDATEIFCOPY/WRTIEBACKIFCOPY if necessary */ if (out != NULL && out != rp) { + PyArray_ResolveWritebackIfCopy(rp); Py_DECREF(rp); rp = out; Py_INCREF(rp); @@ -233,7 +234,7 @@ PyArray_ArgMin(PyArrayObject *op, int axis, PyArrayObject *out) } rp = (PyArrayObject *)PyArray_FromArray(out, PyArray_DescrFromType(NPY_INTP), - NPY_ARRAY_CARRAY | NPY_ARRAY_UPDATEIFCOPY); + NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY); if (rp == NULL) { goto fail; } @@ -249,8 +250,9 @@ PyArray_ArgMin(PyArrayObject *op, int axis, PyArrayObject *out) NPY_END_THREADS_DESCR(PyArray_DESCR(ap)); Py_DECREF(ap); - /* Trigger the UPDATEIFCOPY if necessary */ + /* Trigger the UPDATEIFCOPY/WRITEBACKIFCOPY if necessary */ if (out != NULL && out != rp) { + PyArray_ResolveWritebackIfCopy(rp); Py_DECREF(rp); rp = out; Py_INCREF(rp); @@ -1117,7 +1119,7 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o oflags = NPY_ARRAY_FARRAY; else oflags = NPY_ARRAY_CARRAY; - oflags |= NPY_ARRAY_UPDATEIFCOPY | NPY_ARRAY_FORCECAST; + oflags |= NPY_ARRAY_WRITEBACKIFCOPY | NPY_ARRAY_FORCECAST; Py_INCREF(indescr); newout = (PyArrayObject*)PyArray_FromArray(out, indescr, oflags); if (newout == NULL) { @@ -1153,6 +1155,7 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o Py_XDECREF(maxa); Py_DECREF(newin); /* Copy back into out if out was not already a nice array. */ + PyArray_ResolveWritebackIfCopy(newout); Py_DECREF(newout); return (PyObject *)out; @@ -1162,7 +1165,8 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o Py_XDECREF(maxa); Py_XDECREF(mina); Py_XDECREF(newin); - PyArray_XDECREF_ERR(newout); + PyArray_DiscardWritebackIfCopy(newout); + Py_XDECREF(newout); return NULL; } diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/multiarray/cblasfuncs.c index 7cb1652bb..c941bb29b 100644 --- a/numpy/core/src/multiarray/cblasfuncs.c +++ b/numpy/core/src/multiarray/cblasfuncs.c @@ -412,7 +412,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, /* set copy-back */ Py_INCREF(out); - if (PyArray_SetUpdateIfCopyBase(out_buf, out) < 0) { + if (PyArray_SetWritebackIfCopyBase(out_buf, out) < 0) { Py_DECREF(out); goto fail; } @@ -772,6 +772,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, Py_DECREF(ap2); /* Trigger possible copyback into `result` */ + PyArray_ResolveWritebackIfCopy(out_buf); Py_DECREF(out_buf); return PyArray_Return(result); diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c index 099cc0394..10efdc4c8 100644 --- a/numpy/core/src/multiarray/common.c +++ b/numpy/core/src/multiarray/common.c @@ -588,7 +588,7 @@ _zerofill(PyArrayObject *ret) NPY_NO_EXPORT int _IsAligned(PyArrayObject *ap) { - unsigned int i; + int i; npy_uintp aligned; npy_uintp alignment = PyArray_DESCR(ap)->alignment; diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 95b1d241a..14f4a8f65 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -339,7 +339,7 @@ arr_insert(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict) } array = (PyArrayObject *)PyArray_FromArray((PyArrayObject *)array0, NULL, - NPY_ARRAY_CARRAY | NPY_ARRAY_UPDATEIFCOPY); + NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY); if (array == NULL) { goto fail; } @@ -414,6 +414,7 @@ arr_insert(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict) Py_XDECREF(values); Py_XDECREF(mask); + PyArray_ResolveWritebackIfCopy(array); Py_DECREF(array); Py_RETURN_NONE; @@ -1439,7 +1440,10 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args) return NULL; } - docstr = PyBytes_AS_STRING(PyUnicode_AsUTF8String(str)); + docstr = PyUnicode_AsUTF8(str); + if (docstr == NULL) { + return NULL; + } #else if (!PyArg_ParseTuple(args, "OO!:add_docstring", &obj, &PyString_Type, &str)) { return NULL; diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index 9927ffb6f..0d79f294c 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -135,7 +135,7 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) * Usually, if data_obj is not an array, dtype should be the result * given by the PyArray_GetArrayParamsFromObject function. * - * The data_obj may be NULL if just a dtype is is known for the source. + * The data_obj may be NULL if just a dtype is known for the source. * * If *flex_dtype is NULL, returns immediately, without setting an * exception. This basically assumes an error was already set previously. @@ -1002,6 +1002,17 @@ PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2) { int type_num1, type_num2, ret_type_num; + /* + * Fast path for identical dtypes. + * + * Non-native-byte-order types are converted to native ones below, so we + * can't quit early. + */ + if (type1 == type2 && PyArray_ISNBO(type1->byteorder)) { + Py_INCREF(type1); + return type1; + } + type_num1 = type1->type_num; type_num2 = type2->type_num; @@ -1294,6 +1305,34 @@ PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2) } /* + * Produces the smallest size and lowest kind type to which all + * input types can be cast. + * + * Equivalent to functools.reduce(PyArray_PromoteTypes, types) + */ +NPY_NO_EXPORT PyArray_Descr * +PyArray_PromoteTypeSequence(PyArray_Descr **types, npy_intp ntypes) +{ + npy_intp i; + PyArray_Descr *ret = NULL; + if (ntypes == 0) { + PyErr_SetString(PyExc_TypeError, "at least one type needed to promote"); + return NULL; + } + ret = types[0]; + Py_INCREF(ret); + for (i = 1; i < ntypes; ++i) { + PyArray_Descr *tmp = PyArray_PromoteTypes(types[i], ret); + Py_DECREF(ret); + ret = tmp; + if (ret == NULL) { + return NULL; + } + } + return ret; +} + +/* * NOTE: While this is unlikely to be a performance problem, if * it is it could be reverted to a simple positive/negative * check as the previous system used. @@ -1579,16 +1618,12 @@ static int min_scalar_type_num(char *valueptr, int type_num, return type_num; } -/*NUMPY_API - * If arr is a scalar (has 0 dimensions) with a built-in number data type, - * finds the smallest type size/kind which can still represent its data. - * Otherwise, returns the array's data type. - * - */ + NPY_NO_EXPORT PyArray_Descr * -PyArray_MinScalarType(PyArrayObject *arr) +PyArray_MinScalarType_internal(PyArrayObject *arr, int *is_small_unsigned) { PyArray_Descr *dtype = PyArray_DESCR(arr); + *is_small_unsigned = 0; /* * If the array isn't a numeric scalar, just return the array's dtype. */ @@ -1599,18 +1634,30 @@ PyArray_MinScalarType(PyArrayObject *arr) else { char *data = PyArray_BYTES(arr); int swap = !PyArray_ISNBO(dtype->byteorder); - int is_small_unsigned = 0; /* An aligned memory buffer large enough to hold any type */ npy_longlong value[4]; dtype->f->copyswap(&value, data, swap, NULL); return PyArray_DescrFromType( min_scalar_type_num((char *)&value, - dtype->type_num, &is_small_unsigned)); + dtype->type_num, is_small_unsigned)); } } +/*NUMPY_API + * If arr is a scalar (has 0 dimensions) with a built-in number data type, + * finds the smallest type size/kind which can still represent its data. + * Otherwise, returns the array's data type. + * + */ +NPY_NO_EXPORT PyArray_Descr * +PyArray_MinScalarType(PyArrayObject *arr) +{ + int is_small_unsigned; + return PyArray_MinScalarType_internal(arr, &is_small_unsigned); +} + /* * Provides an ordering for the dtype 'kind' character codes, to help * determine when to use the min_scalar_type function. This groups @@ -1658,12 +1705,11 @@ PyArray_ResultType(npy_intp narrs, PyArrayObject **arr, npy_intp ndtypes, PyArray_Descr **dtypes) { npy_intp i; - int use_min_scalar = 0; - PyArray_Descr *ret = NULL, *tmpret; - int ret_is_small_unsigned = 0; + int use_min_scalar; /* If there's just one type, pass it through */ if (narrs + ndtypes == 1) { + PyArray_Descr *ret = NULL; if (narrs == 1) { ret = PyArray_DESCR(arr[0]); } @@ -1679,28 +1725,30 @@ PyArray_ResultType(npy_intp narrs, PyArrayObject **arr, * the maximum "kind" of the scalars surpasses the maximum * "kind" of the arrays */ + use_min_scalar = 0; if (narrs > 0) { - int all_scalars, max_scalar_kind = -1, max_array_kind = -1; - int kind; + int all_scalars; + int max_scalar_kind = -1; + int max_array_kind = -1; all_scalars = (ndtypes > 0) ? 0 : 1; /* Compute the maximum "kinds" and whether everything is scalar */ for (i = 0; i < narrs; ++i) { if (PyArray_NDIM(arr[i]) == 0) { - kind = dtype_kind_to_simplified_ordering( + int kind = dtype_kind_to_simplified_ordering( PyArray_DESCR(arr[i])->kind); if (kind > max_scalar_kind) { max_scalar_kind = kind; } } else { - all_scalars = 0; - kind = dtype_kind_to_simplified_ordering( + int kind = dtype_kind_to_simplified_ordering( PyArray_DESCR(arr[i])->kind); if (kind > max_array_kind) { max_array_kind = kind; } + all_scalars = 0; } } /* @@ -1708,7 +1756,7 @@ PyArray_ResultType(npy_intp narrs, PyArrayObject **arr, * finish computing the max array kind */ for (i = 0; i < ndtypes; ++i) { - kind = dtype_kind_to_simplified_ordering(dtypes[i]->kind); + int kind = dtype_kind_to_simplified_ordering(dtypes[i]->kind); if (kind > max_array_kind) { max_array_kind = kind; } @@ -1722,75 +1770,36 @@ PyArray_ResultType(npy_intp narrs, PyArrayObject **arr, /* Loop through all the types, promoting them */ if (!use_min_scalar) { + PyArray_Descr *ret; + + /* Build a single array of all the dtypes */ + PyArray_Descr **all_dtypes = PyArray_malloc( + sizeof(*all_dtypes) * (narrs + ndtypes)); + if (all_dtypes == NULL) { + PyErr_NoMemory(); + return NULL; + } for (i = 0; i < narrs; ++i) { - PyArray_Descr *tmp = PyArray_DESCR(arr[i]); - /* Combine it with the existing type */ - if (ret == NULL) { - ret = tmp; - Py_INCREF(ret); - } - else { - /* Only call promote if the types aren't the same dtype */ - if (tmp != ret || !PyArray_ISNBO(ret->byteorder)) { - tmpret = PyArray_PromoteTypes(tmp, ret); - Py_DECREF(ret); - ret = tmpret; - if (ret == NULL) { - return NULL; - } - } - } + all_dtypes[i] = PyArray_DESCR(arr[i]); } - for (i = 0; i < ndtypes; ++i) { - PyArray_Descr *tmp = dtypes[i]; - /* Combine it with the existing type */ - if (ret == NULL) { - ret = tmp; - Py_INCREF(ret); - } - else { - /* Only call promote if the types aren't the same dtype */ - if (tmp != ret || !PyArray_ISNBO(tmp->byteorder)) { - tmpret = PyArray_PromoteTypes(tmp, ret); - Py_DECREF(ret); - ret = tmpret; - if (ret == NULL) { - return NULL; - } - } - } + all_dtypes[narrs + i] = dtypes[i]; } + ret = PyArray_PromoteTypeSequence(all_dtypes, narrs + ndtypes); + PyArray_free(all_dtypes); + return ret; } else { + int ret_is_small_unsigned = 0; + PyArray_Descr *ret = NULL; + for (i = 0; i < narrs; ++i) { - /* Get the min scalar type for the array */ - PyArray_Descr *tmp = PyArray_DESCR(arr[i]); - int tmp_is_small_unsigned = 0; - /* - * If it's a scalar, find the min scalar type. The function - * is expanded here so that we can flag whether we've got an - * unsigned integer which would fit an a signed integer - * of the same size, something not exposed in the public API. - */ - if (PyArray_NDIM(arr[i]) == 0 && - PyTypeNum_ISNUMBER(tmp->type_num)) { - char *data = PyArray_BYTES(arr[i]); - int swap = !PyArray_ISNBO(tmp->byteorder); - int type_num; - /* An aligned memory buffer large enough to hold any type */ - npy_longlong value[4]; - tmp->f->copyswap(&value, data, swap, NULL); - type_num = min_scalar_type_num((char *)&value, - tmp->type_num, &tmp_is_small_unsigned); - tmp = PyArray_DescrFromType(type_num); - if (tmp == NULL) { - Py_XDECREF(ret); - return NULL; - } - } - else { - Py_INCREF(tmp); + int tmp_is_small_unsigned; + PyArray_Descr *tmp = PyArray_MinScalarType_internal( + arr[i], &tmp_is_small_unsigned); + if (tmp == NULL) { + Py_XDECREF(ret); + return NULL; } /* Combine it with the existing type */ if (ret == NULL) { @@ -1798,30 +1807,15 @@ PyArray_ResultType(npy_intp narrs, PyArrayObject **arr, ret_is_small_unsigned = tmp_is_small_unsigned; } else { -#if 0 - printf("promoting type "); - PyObject_Print(tmp, stdout, 0); - printf(" (%d) ", tmp_is_small_unsigned); - PyObject_Print(ret, stdout, 0); - printf(" (%d) ", ret_is_small_unsigned); - printf("\n"); -#endif - /* If they point to the same type, don't call promote */ - if (tmp == ret && PyArray_ISNBO(tmp->byteorder)) { - Py_DECREF(tmp); - } - else { - tmpret = promote_types(tmp, ret, tmp_is_small_unsigned, - ret_is_small_unsigned); - if (tmpret == NULL) { - Py_DECREF(tmp); - Py_DECREF(ret); - return NULL; - } - Py_DECREF(tmp); - Py_DECREF(ret); - ret = tmpret; + PyArray_Descr *tmpret = promote_types( + tmp, ret, tmp_is_small_unsigned, ret_is_small_unsigned); + Py_DECREF(tmp); + Py_DECREF(ret); + ret = tmpret; + if (ret == NULL) { + return NULL; } + ret_is_small_unsigned = tmp_is_small_unsigned && ret_is_small_unsigned; } @@ -1835,36 +1829,23 @@ PyArray_ResultType(npy_intp narrs, PyArrayObject **arr, Py_INCREF(ret); } else { - /* Only call promote if the types aren't the same dtype */ - if (tmp != ret || !PyArray_ISNBO(tmp->byteorder)) { - if (ret_is_small_unsigned) { - tmpret = promote_types(tmp, ret, 0, - ret_is_small_unsigned); - if (tmpret == NULL) { - Py_DECREF(tmp); - Py_DECREF(ret); - return NULL; - } - } - else { - tmpret = PyArray_PromoteTypes(tmp, ret); - } - Py_DECREF(ret); - ret = tmpret; - if (ret == NULL) { - return NULL; - } + PyArray_Descr *tmpret = promote_types( + tmp, ret, 0, ret_is_small_unsigned); + Py_DECREF(ret); + ret = tmpret; + if (ret == NULL) { + return NULL; } } } - } + /* None of the above loops ran */ + if (ret == NULL) { + PyErr_SetString(PyExc_TypeError, + "no arrays or types available to calculate result type"); + } - if (ret == NULL) { - PyErr_SetString(PyExc_TypeError, - "no arrays or types available to calculate result type"); + return ret; } - - return ret; } /*NUMPY_API diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index d31a9cf74..3d6b161b1 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -1009,7 +1009,8 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd, } } else { - fa->flags = (flags & ~NPY_ARRAY_UPDATEIFCOPY); + fa->flags = (flags & ~NPY_ARRAY_WRITEBACKIFCOPY); + fa->flags = (fa->flags & ~NPY_ARRAY_UPDATEIFCOPY); } fa->descr = descr; fa->base = (PyObject *)NULL; @@ -1703,10 +1704,11 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, /* If we got dimensions and dtype instead of an array */ if (arr == NULL) { - if (flags & NPY_ARRAY_UPDATEIFCOPY) { + if ((flags & NPY_ARRAY_WRITEBACKIFCOPY) || + (flags & NPY_ARRAY_UPDATEIFCOPY)) { Py_XDECREF(newtype); PyErr_SetString(PyExc_TypeError, - "UPDATEIFCOPY used for non-array input."); + "WRITEBACKIFCOPY used for non-array input."); return NULL; } else if (min_depth != 0 && ndim < min_depth) { @@ -1810,6 +1812,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, * NPY_ARRAY_NOTSWAPPED, * NPY_ARRAY_ENSURECOPY, * NPY_ARRAY_UPDATEIFCOPY, + * NPY_ARRAY_WRITEBACKIFCOPY, * NPY_ARRAY_FORCECAST, * NPY_ARRAY_ENSUREARRAY, * NPY_ARRAY_ELEMENTSTRIDES @@ -1834,10 +1837,13 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, * Fortran arrays are always behaved (aligned, * notswapped, and writeable) and not (C) CONTIGUOUS (if > 1d). * - * NPY_ARRAY_UPDATEIFCOPY flag sets this flag in the returned array if a copy - * is made and the base argument points to the (possibly) misbehaved array. - * When the new array is deallocated, the original array held in base - * is updated with the contents of the new array. + * NPY_ARRAY_UPDATEIFCOPY is deprecated in favor of + * NPY_ARRAY_WRITEBACKIFCOPY in 1.14 + + * NPY_ARRAY_WRITEBACKIFCOPY flag sets this flag in the returned + * array if a copy is made and the base argument points to the (possibly) + * misbehaved array. Before returning to python, PyArray_ResolveWritebackIfCopy + * must be called to update the contents of the original array from the copy. * * NPY_ARRAY_FORCECAST will cause a cast to occur regardless of whether or not * it is safe. @@ -2001,9 +2007,30 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) return NULL; } - if (flags & NPY_ARRAY_UPDATEIFCOPY) { + if (flags & NPY_ARRAY_UPDATEIFCOPY) { + /* This is the ONLY place the NPY_ARRAY_UPDATEIFCOPY flag + * is still used. + * Can be deleted once the flag itself is removed + */ + + /* 2017-Nov-10 1.14 */ + if (DEPRECATE("NPY_ARRAY_UPDATEIFCOPY, NPY_ARRAY_INOUT_ARRAY, and " + "NPY_ARRAY_INOUT_FARRAY are deprecated, use NPY_WRITEBACKIFCOPY, " + "NPY_ARRAY_INOUT_ARRAY2, or NPY_ARRAY_INOUT_FARRAY2 respectively " + "instead, and call PyArray_ResolveWritebackIfCopy before the " + "array is deallocated, i.e. before the last call to Py_DECREF.") < 0) + return NULL; + Py_INCREF(arr); + if (PyArray_SetWritebackIfCopyBase(ret, arr) < 0) { + Py_DECREF(ret); + return NULL; + } + PyArray_ENABLEFLAGS(ret, NPY_ARRAY_UPDATEIFCOPY); + PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEBACKIFCOPY); + } + else if (flags & NPY_ARRAY_WRITEBACKIFCOPY) { Py_INCREF(arr); - if (PyArray_SetUpdateIfCopyBase(ret, arr) < 0) { + if (PyArray_SetWritebackIfCopyBase(ret, arr) < 0) { Py_DECREF(ret); return NULL; } @@ -2154,7 +2181,6 @@ _is_default_descr(PyObject *descr, PyObject *typestr) { NPY_NO_EXPORT PyObject * PyArray_FromInterface(PyObject *origin) { - PyObject *tmp = NULL; PyObject *iface = NULL; PyObject *attr = NULL; PyObject *base = NULL; @@ -2189,9 +2215,15 @@ PyArray_FromInterface(PyObject *origin) #if defined(NPY_PY3K) /* Allow unicode type strings */ if (PyUnicode_Check(attr)) { - tmp = PyUnicode_AsASCIIString(attr); + PyObject *tmp = PyUnicode_AsASCIIString(attr); + if (tmp == NULL) { + goto fail; + } attr = tmp; } + else { + Py_INCREF(attr); + } #endif if (!PyBytes_Check(attr)) { PyErr_SetString(PyExc_TypeError, @@ -2200,11 +2232,6 @@ PyArray_FromInterface(PyObject *origin) } /* Get dtype from type string */ dtype = _array_typedescr_fromstr(PyString_AS_STRING(attr)); -#if defined(NPY_PY3K) - if (tmp == attr) { - Py_DECREF(tmp); - } -#endif if (dtype == NULL) { goto fail; } @@ -2224,6 +2251,10 @@ PyArray_FromInterface(PyObject *origin) dtype = new_dtype; } } + +#if defined(NPY_PY3K) + Py_DECREF(attr); /* Pairs with the unicode handling above */ +#endif /* Get shape tuple from interface specification */ attr = PyDict_GetItemString(iface, "shape"); @@ -2251,7 +2282,7 @@ PyArray_FromInterface(PyObject *origin) else { n = PyTuple_GET_SIZE(attr); for (i = 0; i < n; i++) { - tmp = PyTuple_GET_ITEM(attr, i); + PyObject *tmp = PyTuple_GET_ITEM(attr, i); dims[i] = PyArray_PyIntAsIntp(tmp); if (error_converting(dims[i])) { goto fail; @@ -2368,7 +2399,7 @@ PyArray_FromInterface(PyObject *origin) goto fail; } for (i = 0; i < n; i++) { - tmp = PyTuple_GET_ITEM(attr, i); + PyObject *tmp = PyTuple_GET_ITEM(attr, i); strides[i] = PyArray_PyIntAsIntp(tmp); if (error_converting(strides[i])) { Py_DECREF(ret); @@ -2908,17 +2939,25 @@ PyArray_Empty(int nd, npy_intp *dims, PyArray_Descr *type, int is_f_order) * Return 0 on success, -1 on failure. In case of failure, set a PyExc_Overflow * exception */ -static int _safe_ceil_to_intp(double value, npy_intp* ret) +static npy_intp +_arange_safe_ceil_to_intp(double value) { double ivalue; ivalue = npy_ceil(value); - if (ivalue < NPY_MIN_INTP || ivalue > NPY_MAX_INTP) { + /* condition inverted to handle NaN */ + if (npy_isnan(ivalue)) { + PyErr_SetString(PyExc_ValueError, + "arange: cannot compute length"); + return -1; + } + if (!(NPY_MIN_INTP <= ivalue && ivalue <= NPY_MAX_INTP)) { + PyErr_SetString(PyExc_OverflowError, + "arange: overflow while computing length"); return -1; } - *ret = (npy_intp)ivalue; - return 0; + return (npy_intp)ivalue; } @@ -2935,9 +2974,9 @@ PyArray_Arange(double start, double stop, double step, int type_num) int ret; NPY_BEGIN_THREADS_DEF; - if (_safe_ceil_to_intp((stop - start)/step, &length)) { - PyErr_SetString(PyExc_OverflowError, - "arange: overflow while computing length"); + length = _arange_safe_ceil_to_intp((stop - start)/step); + if (error_converting(length)) { + return NULL; } if (length <= 0) { @@ -3026,10 +3065,9 @@ _calc_length(PyObject *start, PyObject *stop, PyObject *step, PyObject **next, i Py_DECREF(val); return -1; } - if (_safe_ceil_to_intp(value, &len)) { + len = _arange_safe_ceil_to_intp(value); + if (error_converting(len)) { Py_DECREF(val); - PyErr_SetString(PyExc_OverflowError, - "arange: overflow while computing length"); return -1; } value = PyComplex_ImagAsDouble(val); @@ -3037,9 +3075,8 @@ _calc_length(PyObject *start, PyObject *stop, PyObject *step, PyObject **next, i if (error_converting(value)) { return -1; } - if (_safe_ceil_to_intp(value, &tmp)) { - PyErr_SetString(PyExc_OverflowError, - "arange: overflow while computing length"); + tmp = _arange_safe_ceil_to_intp(value); + if (error_converting(tmp)) { return -1; } len = PyArray_MIN(len, tmp); @@ -3050,9 +3087,8 @@ _calc_length(PyObject *start, PyObject *stop, PyObject *step, PyObject **next, i if (error_converting(value)) { return -1; } - if (_safe_ceil_to_intp(value, &len)) { - PyErr_SetString(PyExc_OverflowError, - "arange: overflow while computing length"); + len = _arange_safe_ceil_to_intp(value); + if (error_converting(len)) { return -1; } } diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c index 93babe8bd..a4a028ad4 100644 --- a/numpy/core/src/multiarray/datetime.c +++ b/numpy/core/src/multiarray/datetime.c @@ -778,8 +778,9 @@ parse_datetime_extended_unit_from_string(char *str, Py_ssize_t len, goto bad_input; } out_meta->base = parse_datetime_unit_from_string(substr, - substrend-substr, metastr); - if (out_meta->base == -1) { + substrend - substr, + metastr); + if (out_meta->base == NPY_FR_ERROR ) { return -1; } substr = substrend; @@ -1073,12 +1074,13 @@ static npy_uint64 get_datetime_units_factor(NPY_DATETIMEUNIT bigbase, NPY_DATETIMEUNIT littlebase) { npy_uint64 factor = 1; - int unit = (int)bigbase; - while (littlebase > unit) { + NPY_DATETIMEUNIT unit = bigbase; + + while (unit < littlebase) { factor *= _datetime_factors[unit]; /* * Detect overflow by disallowing the top 16 bits to be 1. - * That alows a margin of error much bigger than any of + * That allows a margin of error much bigger than any of * the datetime factors. */ if (factor&0xff00000000000000ULL) { @@ -1719,7 +1721,7 @@ datetime_type_promotion(PyArray_Descr *type1, PyArray_Descr *type2) * a date time unit enum value. The 'metastr' parameter * is used for error messages, and may be NULL. * - * Returns 0 on success, -1 on failure. + * Returns NPY_DATETIMEUNIT on success, NPY_FR_ERROR on failure. */ NPY_NO_EXPORT NPY_DATETIMEUNIT parse_datetime_unit_from_string(char *str, Py_ssize_t len, char *metastr) @@ -1775,7 +1777,7 @@ parse_datetime_unit_from_string(char *str, Py_ssize_t len, char *metastr) "Invalid datetime unit in metadata string \"%s\"", metastr); } - return -1; + return NPY_FR_ERROR; } @@ -1847,7 +1849,7 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple, } out_meta->base = parse_datetime_unit_from_string(basestr, len, NULL); - if (out_meta->base == -1) { + if (out_meta->base == NPY_FR_ERROR) { Py_DECREF(unit_str); return -1; } @@ -2150,7 +2152,7 @@ add_minutes_to_datetimestruct(npy_datetimestruct *dts, int minutes) * to UTC time, otherwise it returns the struct with the local time. * * Returns -1 on error, 0 on success, and 1 (with no error set) - * if obj doesn't have the neeeded date or datetime attributes. + * if obj doesn't have the needed date or datetime attributes. */ NPY_NO_EXPORT int convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out, @@ -2418,7 +2420,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj, char *str = NULL; Py_ssize_t len = 0; npy_datetimestruct dts; - NPY_DATETIMEUNIT bestunit = -1; + NPY_DATETIMEUNIT bestunit = NPY_FR_ERROR; /* Convert to an ASCII string for the date parser */ if (PyUnicode_Check(obj)) { @@ -2444,7 +2446,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj, } /* Use the detected unit if none was specified */ - if (meta->base == -1) { + if (meta->base == NPY_FR_ERROR) { meta->base = bestunit; meta->num = 1; } @@ -2460,7 +2462,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj, /* Do no conversion on raw integers */ else if (PyInt_Check(obj) || PyLong_Check(obj)) { /* Don't allow conversion from an integer without specifying a unit */ - if (meta->base == -1 || meta->base == NPY_FR_GENERIC) { + if (meta->base == NPY_FR_ERROR || meta->base == NPY_FR_GENERIC) { PyErr_SetString(PyExc_ValueError, "Converting an integer to a " "NumPy datetime requires a specified unit"); return -1; @@ -2473,7 +2475,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj, PyDatetimeScalarObject *dts = (PyDatetimeScalarObject *)obj; /* Copy the scalar directly if units weren't specified */ - if (meta->base == -1) { + if (meta->base == NPY_FR_ERROR) { *meta = dts->obmeta; *out = dts->obval; @@ -2512,7 +2514,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj, obj); /* Copy the value directly if units weren't specified */ - if (meta->base == -1) { + if (meta->base == NPY_FR_ERROR) { *meta = *arr_meta; *out = dt; @@ -2536,7 +2538,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj, else { int code; npy_datetimestruct dts; - NPY_DATETIMEUNIT bestunit = -1; + NPY_DATETIMEUNIT bestunit = NPY_FR_ERROR; code = convert_pydatetime_to_datetimestruct(obj, &dts, &bestunit, 1); if (code == -1) { @@ -2544,7 +2546,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj, } else if (code == 0) { /* Use the detected unit if none was specified */ - if (meta->base == -1) { + if (meta->base == NPY_FR_ERROR) { meta->base = bestunit; meta->num = 1; } @@ -2571,7 +2573,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj, */ if (casting == NPY_UNSAFE_CASTING || (obj == Py_None && casting == NPY_SAME_KIND_CASTING)) { - if (meta->base == -1) { + if (meta->base == NPY_FR_ERROR) { meta->base = NPY_FR_GENERIC; meta->num = 1; } @@ -2647,7 +2649,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, if (succeeded) { /* Use generic units if none was specified */ - if (meta->base == -1) { + if (meta->base == NPY_FR_ERROR) { meta->base = NPY_FR_GENERIC; meta->num = 1; } @@ -2658,7 +2660,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, /* Do no conversion on raw integers */ else if (PyInt_Check(obj) || PyLong_Check(obj)) { /* Use the default unit if none was specified */ - if (meta->base == -1) { + if (meta->base == NPY_FR_ERROR) { meta->base = NPY_DATETIME_DEFAULTUNIT; meta->num = 1; } @@ -2671,7 +2673,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, PyTimedeltaScalarObject *dts = (PyTimedeltaScalarObject *)obj; /* Copy the scalar directly if units weren't specified */ - if (meta->base == -1) { + if (meta->base == NPY_FR_ERROR) { *meta = dts->obmeta; *out = dts->obval; @@ -2710,7 +2712,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, obj); /* Copy the value directly if units weren't specified */ - if (meta->base == -1) { + if (meta->base == NPY_FR_ERROR) { *meta = *arr_meta; *out = dt; @@ -2779,7 +2781,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, td = days*(24*60*60*1000000LL) + seconds*1000000LL + useconds; /* Use microseconds if none was specified */ - if (meta->base == -1) { + if (meta->base == NPY_FR_ERROR) { meta->base = NPY_FR_us; meta->num = 1; @@ -2833,7 +2835,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, */ if (casting == NPY_UNSAFE_CASTING || (obj == Py_None && casting == NPY_SAME_KIND_CASTING)) { - if (meta->base == -1) { + if (meta->base == NPY_FR_ERROR) { meta->base = NPY_FR_GENERIC; meta->num = 1; } @@ -3167,7 +3169,7 @@ convert_pyobjects_to_datetimes(int count, } /* Use the inputs to resolve the unit metadata if requested */ - if (inout_meta->base == -1) { + if (inout_meta->base == NPY_FR_ERROR) { /* Allocate an array of metadata corresponding to the objects */ meta = PyArray_malloc(count * sizeof(PyArray_DatetimeMetaData)); if (meta == NULL) { @@ -3177,7 +3179,7 @@ convert_pyobjects_to_datetimes(int count, /* Convert all the objects into timedeltas or datetimes */ for (i = 0; i < count; ++i) { - meta[i].base = -1; + meta[i].base = NPY_FR_ERROR; meta[i].num = 1; /* NULL -> NaT */ @@ -3344,7 +3346,7 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step, */ if (meta_tmp->base == NPY_FR_GENERIC) { dtype = NULL; - meta.base = -1; + meta.base = NPY_FR_ERROR; } /* Otherwise use the provided metadata */ else { @@ -3360,7 +3362,7 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step, type_nums[0] = NPY_TIMEDELTA; } - meta.base = -1; + meta.base = NPY_FR_ERROR; } if (type_nums[0] == NPY_DATETIME && start == NULL) { @@ -3550,7 +3552,7 @@ find_string_array_datetime64_type(PyArrayObject *arr, memcpy(tmp_buffer, data, maxlen); tmp_buffer[maxlen] = '\0'; - tmp_meta.base = -1; + tmp_meta.base = NPY_FR_ERROR; if (parse_iso_8601_datetime(tmp_buffer, maxlen, -1, NPY_UNSAFE_CASTING, &dts, &tmp_meta.base, NULL) < 0) { @@ -3559,7 +3561,7 @@ find_string_array_datetime64_type(PyArrayObject *arr, } /* Otherwise parse the data in place */ else { - tmp_meta.base = -1; + tmp_meta.base = NPY_FR_ERROR; if (parse_iso_8601_datetime(data, tmp - data, -1, NPY_UNSAFE_CASTING, &dts, &tmp_meta.base, NULL) < 0) { @@ -3651,7 +3653,7 @@ recursive_find_object_datetime64_type(PyObject *obj, npy_datetime tmp = 0; PyArray_DatetimeMetaData tmp_meta; - tmp_meta.base = -1; + tmp_meta.base = NPY_FR_ERROR; tmp_meta.num = 1; if (convert_pyobject_to_datetime(&tmp_meta, obj, diff --git a/numpy/core/src/multiarray/datetime_strings.c b/numpy/core/src/multiarray/datetime_strings.c index b9aeda508..96cb66b95 100644 --- a/numpy/core/src/multiarray/datetime_strings.c +++ b/numpy/core/src/multiarray/datetime_strings.c @@ -307,8 +307,8 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len, } /* Check the casting rule */ - if (unit != -1 && !can_cast_datetime64_units(bestunit, unit, - casting)) { + if (unit != NPY_FR_ERROR && + !can_cast_datetime64_units(bestunit, unit, casting)) { PyErr_Format(PyExc_TypeError, "Cannot parse \"%s\" as unit " "'%s' using casting rule %s", str, _datetime_strings[unit], @@ -347,8 +347,8 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len, } /* Check the casting rule */ - if (unit != -1 && !can_cast_datetime64_units(bestunit, unit, - casting)) { + if (unit != NPY_FR_ERROR && + !can_cast_datetime64_units(bestunit, unit, casting)) { PyErr_Format(PyExc_TypeError, "Cannot parse \"%s\" as unit " "'%s' using casting rule %s", str, _datetime_strings[unit], @@ -730,8 +730,8 @@ finish: } /* Check the casting rule */ - if (unit != -1 && !can_cast_datetime64_units(bestunit, unit, - casting)) { + if (unit != NPY_FR_ERROR && + !can_cast_datetime64_units(bestunit, unit, casting)) { PyErr_Format(PyExc_TypeError, "Cannot parse \"%s\" as unit " "'%s' using casting rule %s", str, _datetime_strings[unit], @@ -760,14 +760,12 @@ get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base) { int len = 0; - /* If no unit is provided, return the maximum length */ - if (base == -1) { - return NPY_DATETIME_MAX_ISO8601_STRLEN; - } - switch (base) { - /* Generic units can only be used to represent NaT */ + case NPY_FR_ERROR: + /* If no unit is provided, return the maximum length */ + return NPY_DATETIME_MAX_ISO8601_STRLEN; case NPY_FR_GENERIC: + /* Generic units can only be used to represent NaT */ return 4; case NPY_FR_as: len += 3; /* "###" */ @@ -928,7 +926,7 @@ make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, npy_intp outlen, } /* Automatically detect a good unit */ - if (base == -1) { + if (base == NPY_FR_ERROR) { base = lossless_unit_from_datetimestruct(dts); /* * If there's a timezone, use at least minutes precision, @@ -1406,20 +1404,24 @@ array_datetime_as_string(PyObject *NPY_UNUSED(self), PyObject *args, goto fail; } - /* unit == -1 means to autodetect the unit from the datetime data */ + /* + * unit == NPY_FR_ERROR means to autodetect the unit + * from the datetime data + * */ if (strcmp(str, "auto") == 0) { - unit = -1; + unit = NPY_FR_ERROR; } else { unit = parse_datetime_unit_from_string(str, len, NULL); - if (unit == -1) { + if (unit == NPY_FR_ERROR) { Py_DECREF(strobj); goto fail; } } Py_DECREF(strobj); - if (unit != -1 && !can_cast_datetime64_units(meta->base, unit, casting)) { + if (unit != NPY_FR_ERROR && + !can_cast_datetime64_units(meta->base, unit, casting)) { PyErr_Format(PyExc_TypeError, "Cannot create a datetime " "string as units '%s' from a NumPy datetime " "with units '%s' according to the rule %s", diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index f6b29edfe..b4a0ce37d 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -198,7 +198,7 @@ _check_for_commastring(char *type, Py_ssize_t len) * allows commas inside of [], for parameterized dtypes to use. */ sqbracket = 0; - for (i = 1; i < len; i++) { + for (i = 0; i < len; i++) { switch (type[i]) { case ',': if (sqbracket == 0) { @@ -512,11 +512,7 @@ _convert_from_array_descr(PyObject *obj, int align) } if ((PyDict_GetItem(fields, name) != NULL) || (title -#if defined(NPY_PY3K) - && PyUString_Check(title) -#else - && (PyUString_Check(title) || PyUnicode_Check(title)) -#endif + && PyBaseString_Check(title) && (PyDict_GetItem(fields, title) != NULL))) { #if defined(NPY_PY3K) name = PyUnicode_AsUTF8String(name); @@ -551,11 +547,7 @@ _convert_from_array_descr(PyObject *obj, int align) Py_INCREF(title); PyTuple_SET_ITEM(tup, 2, title); PyDict_SetItem(fields, name, tup); -#if defined(NPY_PY3K) - if (PyUString_Check(title)) { -#else - if (PyUString_Check(title) || PyUnicode_Check(title)) { -#endif + if (PyBaseString_Check(title)) { if (PyDict_GetItem(fields, title) != NULL) { PyErr_SetString(PyExc_ValueError, "title already used as a name or title."); @@ -1181,11 +1173,7 @@ _convert_from_dict(PyObject *obj, int align) Py_DECREF(tup); goto fail; } -#if defined(NPY_PY3K) - if (!PyUString_Check(name)) { -#else - if (!(PyUString_Check(name) || PyUnicode_Check(name))) { -#endif + if (!PyBaseString_Check(name)) { PyErr_SetString(PyExc_ValueError, "field names must be strings"); Py_DECREF(tup); @@ -1202,11 +1190,7 @@ _convert_from_dict(PyObject *obj, int align) PyDict_SetItem(fields, name, tup); Py_DECREF(name); if (len == 3) { -#if defined(NPY_PY3K) - if (PyUString_Check(title)) { -#else - if (PyUString_Check(title) || PyUnicode_Check(title)) { -#endif + if (PyBaseString_Check(title)) { if (PyDict_GetItem(fields, title) != NULL) { PyErr_SetString(PyExc_ValueError, "title already used as a name or title."); @@ -3356,8 +3340,8 @@ arraydescr_struct_str(PyArray_Descr *dtype, int includealignflag) sub = arraydescr_struct_dict_str(dtype, includealignflag); } - /* If the data type has a non-void (subclassed) type, show it */ - if (dtype->type_num == NPY_VOID && dtype->typeobj != &PyVoidArrType_Type) { + /* If the data type isn't the default, void, show it */ + if (dtype->typeobj != &PyVoidArrType_Type) { /* * Note: We cannot get the type name from dtype->typeobj->tp_name * because its value depends on whether the type is dynamically or @@ -3821,11 +3805,7 @@ descr_subscript(PyArray_Descr *self, PyObject *op) return NULL; } -#if defined(NPY_PY3K) - if (PyUString_Check(op)) { -#else - if (PyUString_Check(op) || PyUnicode_Check(op)) { -#endif + if (PyBaseString_Check(op)) { return _subscript_by_name(self, op); } else { diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c index ab0741932..e256b0ad7 100644 --- a/numpy/core/src/multiarray/dragon4.c +++ b/numpy/core/src/multiarray/dragon4.c @@ -35,8 +35,12 @@ #include <string.h> #include <assert.h> -/* #define DEBUG_ASSERT(stmnt) assert(stmnt) */ -#define DEBUG_ASSERT(stmnt) {} + +#if 0 +#define DEBUG_ASSERT(stmnt) assert(stmnt) +#else +#define DEBUG_ASSERT(stmnt) do {} while(0) +#endif /* * Get the log base 2 of a 32-bit unsigned integer. @@ -892,7 +896,7 @@ BigInt_ShiftLeft(BigInt *result, npy_uint32 shift) if (shiftBits == 0) { npy_uint32 i; - /* copy blcoks from high to low */ + /* copy blocks from high to low */ for (pInCur = result->blocks + result->length, pOutCur = pInCur + shiftBlocks; pInCur >= pInBlocks; @@ -951,28 +955,6 @@ BigInt_ShiftLeft(BigInt *result, npy_uint32 shift) } } -typedef enum CutoffMode -{ - /* - * As many digits as necessary to print a uniquely identifiable number. - * cutoffNumber should be -1. - */ - CutoffMode_Unique, - /* up to cutoffNumber significant digits */ - CutoffMode_TotalLength, - /* up to cutoffNumber significant digits past the decimal point */ - CutoffMode_FractionLength, - /* - * up to cutoffNumber digits, or fewer if the number can be uniquely - * identified with fewer - */ - CutoffMode_MaxTotalUnique, - /* - * up to cutoffNumber digits pas decimal point, or fewer if the number can - * be uniquely identified with fewer - */ - CutoffMode_MaxFractionUnique, -} CutoffMode; /* * This is an implementation the Dragon4 algorithm to convert a binary number in @@ -1020,8 +1002,8 @@ typedef enum CutoffMode * * exponent - value exponent in base 2 * * mantissaBit - index of the highest set mantissa bit * * hasUnequalMargins - is the high margin twice as large as the low margin - * * cutoffMode - how to determine output length - * * cutoffNumber - parameter to the selected cutoffMode. For each mode: + * * cutoffMode - how to interpret cutoffNumber: fractional or total digits? + * * cutoffNumber - cut off printing after this many digits. -1 for no cutoff * * pOutBuffer - buffer to output into * * bufferSize - maximum characters that can be printed to pOutBuffer * * pOutExponent - the base 10 exponent of the first digit @@ -1029,7 +1011,8 @@ typedef enum CutoffMode static npy_uint32 Dragon4(const npy_uint64 mantissa, const npy_int32 exponent, const npy_uint32 mantissaBit, const npy_bool hasUnequalMargins, - const CutoffMode cutoffMode, npy_int32 cutoffNumber, char *pOutBuffer, + const DigitMode digitMode, const CutoffMode cutoffMode, + npy_int32 cutoffNumber, char *pOutBuffer, npy_uint32 bufferSize, npy_int32 *pOutExponent) { char *curDigit = pOutBuffer; @@ -1053,7 +1036,7 @@ Dragon4(const npy_uint64 mantissa, const npy_int32 exponent, BigInt optionalMarginHigh; const npy_float64 log10_2 = 0.30102999566398119521373889472449; - npy_int32 digitExponent, cutoffExponent, desiredCutoffExponent, hiBlock; + npy_int32 digitExponent, cutoffExponent, hiBlock; npy_uint32 outputDigit; /* current digit being output */ npy_uint32 outputLen; npy_bool isEven = (mantissa % 2) == 0; @@ -1187,8 +1170,7 @@ Dragon4(const npy_uint64 mantissa, const npy_int32 exponent, * increases the number. This will either correct digitExponent to an * accurate value or it will clamp it above the accurate value. */ - if ( (cutoffMode == CutoffMode_FractionLength || - cutoffMode == CutoffMode_MaxFractionUnique) && + if (cutoffNumber >= 0 && cutoffMode == CutoffMode_FractionLength && digitExponent <= -cutoffNumber) { digitExponent = -cutoffNumber + 1; } @@ -1247,29 +1229,24 @@ Dragon4(const npy_uint64 mantissa, const npy_int32 exponent, * Default to the maximum size of the output buffer. */ cutoffExponent = digitExponent - bufferSize; - switch(cutoffMode) { - /* print digits until we pass the accuracy margin or buffer size */ - case CutoffMode_Unique: - DEBUG_ASSERT(cutoffNumber == -1); - break; - /* print cutoffNumber of digits or until we reach the buffer size */ - case CutoffMode_MaxTotalUnique: - case CutoffMode_TotalLength: + if (cutoffNumber >= 0) { + npy_int32 desiredCutoffExponent; + + if (cutoffMode == CutoffMode_TotalLength) { desiredCutoffExponent = digitExponent - cutoffNumber; if (desiredCutoffExponent > cutoffExponent) { cutoffExponent = desiredCutoffExponent; } - break; - /* print cutoffNumber digits past the decimal point or until we reach - * the buffer size + } + /* Otherwise it's CutoffMode_FractionLength. Print cutoffNumber digits + * past the decimal point or until we reach the buffer size */ - case CutoffMode_MaxFractionUnique: - case CutoffMode_FractionLength: + else { desiredCutoffExponent = -cutoffNumber; if (desiredCutoffExponent > cutoffExponent) { cutoffExponent = desiredCutoffExponent; } - break; + } } /* Output the exponent of the first digit we will print */ @@ -1311,9 +1288,7 @@ Dragon4(const npy_uint64 mantissa, const npy_int32 exponent, } } - if (cutoffMode == CutoffMode_Unique || - cutoffMode == CutoffMode_MaxFractionUnique || - cutoffMode == CutoffMode_MaxTotalUnique) { + if (digitMode == DigitMode_Unique) { /* * For the unique cutoff mode, we will try to print until we have * reached a level of precision that uniquely distinguishes this value @@ -1358,7 +1333,7 @@ Dragon4(const npy_uint64 mantissa, const npy_int32 exponent, } else { /* - * For length based cutoff modes, we will try to print until we + * For exact digit mode, we will try to print until we * have exhausted all precision (i.e. all remaining digits are zeros) or * until we reach the desired cutoff digit. */ @@ -1406,7 +1381,7 @@ Dragon4(const npy_uint64 mantissa, const npy_int32 exponent, /* * if we are directly in the middle, round towards the even digit (i.e. - * IEEE rouding rules) + * IEEE rounding rules) */ if (compare == 0) { roundDown = (outputDigit & 1) == 0; @@ -1607,31 +1582,22 @@ typedef union FloatUnion80 static npy_uint32 FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, npy_int32 exponent, char signbit, npy_uint32 mantissaBit, - npy_bool hasUnequalMargins, npy_bool unique, - npy_int32 precision, TrimMode trim_mode, - npy_int32 digits_left, npy_int32 digits_right) + npy_bool hasUnequalMargins, DigitMode digit_mode, + CutoffMode cutoff_mode, npy_int32 precision, + TrimMode trim_mode, npy_int32 digits_left, + npy_int32 digits_right) { npy_int32 printExponent; npy_int32 numDigits, numWholeDigits, has_sign=0; - npy_int32 maxPrintLen = bufferSize - 1, pos = 0; - CutoffMode cutoffMode; + npy_int32 maxPrintLen = (npy_int32)bufferSize - 1, pos = 0; /* track the # of digits past the decimal point that have been printed */ npy_int32 numFractionDigits = 0; DEBUG_ASSERT(bufferSize > 0); - if (unique) { - if (precision < 0) { - cutoffMode = CutoffMode_Unique; - } - else { - cutoffMode = CutoffMode_MaxFractionUnique; - } - } - else { - cutoffMode = CutoffMode_FractionLength; + if (digit_mode != DigitMode_Unique) { DEBUG_ASSERT(precision >= 0); } @@ -1645,7 +1611,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, } numDigits = Dragon4(mantissa, exponent, mantissaBit, hasUnequalMargins, - cutoffMode, precision, buffer + has_sign, + digit_mode, cutoff_mode, precision, buffer + has_sign, maxPrintLen - has_sign, &printExponent); DEBUG_ASSERT(numDigits > 0); @@ -1671,11 +1637,11 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, } } /* insert the decimal point prior to the fraction */ - else if (numDigits > (npy_uint32)numWholeDigits) { - npy_uint32 maxFractionDigits; + else if (numDigits > numWholeDigits) { + npy_int32 maxFractionDigits; numFractionDigits = numDigits - numWholeDigits; - maxFractionDigits = maxPrintLen - numWholeDigits -1-pos; + maxFractionDigits = maxPrintLen - numWholeDigits - 1 - pos; if (numFractionDigits > maxFractionDigits) { numFractionDigits = maxFractionDigits; } @@ -1690,19 +1656,20 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, } else { /* shift out the fraction to make room for the leading zeros */ - npy_uint32 numFractionZeros = 0; + npy_int32 numFractionZeros = 0; if (pos + 2 < maxPrintLen) { - npy_uint32 maxFractionZeros, digitsStartIdx, maxFractionDigits, i; + npy_int32 maxFractionZeros, digitsStartIdx, maxFractionDigits, i; maxFractionZeros = maxPrintLen - 2 - pos; - numFractionZeros = (npy_uint32)-printExponent - 1; + numFractionZeros = -(printExponent + 1); if (numFractionZeros > maxFractionZeros) { numFractionZeros = maxFractionZeros; } digitsStartIdx = 2 + numFractionZeros; - /* shift the significant digits right such that there is room for + /* + * shift the significant digits right such that there is room for * leading zeros */ numFractionDigits = numDigits; @@ -1752,11 +1719,11 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, } } else if (trim_mode == TrimMode_None && - cutoffMode != CutoffMode_MaxFractionUnique && - precision > (npy_int32)numFractionDigits && pos < maxPrintLen) { + digit_mode != DigitMode_Unique && + precision > numFractionDigits && pos < maxPrintLen) { /* add trailing zeros up to precision length */ /* compute the number of trailing zeros needed */ - npy_uint32 count = precision - numFractionDigits; + npy_int32 count = precision - numFractionDigits; if (pos + count > maxPrintLen) { count = maxPrintLen - pos; } @@ -1785,7 +1752,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, /* add any whitespace padding to right side */ if (digits_right >= numFractionDigits) { - npy_uint32 count = digits_right - numFractionDigits; + npy_int32 count = digits_right - numFractionDigits; /* in trim_mode DptZeros, if right padding, add a space for the . */ if (trim_mode == TrimMode_DptZeros && numFractionDigits == 0 @@ -1803,8 +1770,8 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, } /* add any whitespace padding to left side */ if (digits_left > numWholeDigits + has_sign) { - npy_uint32 shift = digits_left - (numWholeDigits + has_sign); - npy_uint32 count = pos; + npy_int32 shift = digits_left - (numWholeDigits + has_sign); + npy_int32 count = pos; if (count + shift > maxPrintLen){ count = maxPrintLen - shift; @@ -1815,7 +1782,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, } pos = shift + count; for ( ; shift > 0; shift--) { - buffer[shift-1] = ' '; + buffer[shift - 1] = ' '; } } @@ -1848,7 +1815,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, static npy_uint32 FormatScientific (char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, npy_int32 exponent, char signbit, npy_uint32 mantissaBit, - npy_bool hasUnequalMargins, npy_bool unique, + npy_bool hasUnequalMargins, DigitMode digit_mode, npy_int32 precision, TrimMode trim_mode, npy_int32 digits_left, npy_int32 exp_digits) { @@ -1857,21 +1824,12 @@ FormatScientific (char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, char *pCurOut; npy_int32 numFractionDigits; npy_int32 leftchars; - CutoffMode cutoffMode; - if (unique) { - if (precision < 0) { - cutoffMode = CutoffMode_Unique; - } - else { - cutoffMode = CutoffMode_MaxTotalUnique; - } - } - else { - cutoffMode = CutoffMode_TotalLength; + if (digit_mode != DigitMode_Unique) { DEBUG_ASSERT(precision >= 0); } + DEBUG_ASSERT(bufferSize > 0); pCurOut = buffer; @@ -1899,8 +1857,8 @@ FormatScientific (char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, } numDigits = Dragon4(mantissa, exponent, mantissaBit, hasUnequalMargins, - cutoffMode, precision + 1, pCurOut, bufferSize, - &printExponent); + digit_mode, CutoffMode_TotalLength, precision + 1, + pCurOut, bufferSize, &printExponent); DEBUG_ASSERT(numDigits > 0); DEBUG_ASSERT(numDigits <= bufferSize); @@ -1914,7 +1872,8 @@ FormatScientific (char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, /* insert the decimal point prior to the fractional number */ numFractionDigits = numDigits-1; if (numFractionDigits > 0 && bufferSize > 1) { - npy_uint32 maxFractionDigits = bufferSize-2; + npy_int32 maxFractionDigits = (npy_int32)bufferSize - 2; + if (numFractionDigits > maxFractionDigits) { numFractionDigits = maxFractionDigits; } @@ -1943,14 +1902,15 @@ FormatScientific (char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, } } else if (trim_mode == TrimMode_None && - cutoffMode != CutoffMode_MaxTotalUnique) { + digit_mode != DigitMode_Unique) { /* add trailing zeros up to precision length */ if (precision > (npy_int32)numFractionDigits) { char *pEnd; /* compute the number of trailing zeros needed */ - npy_uint32 numZeros = (precision - numFractionDigits); - if (numZeros > bufferSize-1) { - numZeros = bufferSize-1; + npy_int32 numZeros = (precision - numFractionDigits); + + if (numZeros > (npy_int32)bufferSize - 1) { + numZeros = (npy_int32)bufferSize - 1; } for (pEnd = pCurOut + numZeros; pCurOut < pEnd; ++pCurOut) { @@ -1984,7 +1944,7 @@ FormatScientific (char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, /* print the exponent into a local buffer and copy into output buffer */ if (bufferSize > 1) { char exponentBuffer[7]; - npy_uint32 digits[5]; + npy_int32 digits[5]; npy_int32 i, exp_size, count; if (exp_digits > 5) { @@ -2021,8 +1981,8 @@ FormatScientific (char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, /* copy the exponent buffer into the output */ count = exp_size + 2; - if (count > bufferSize-1) { - count = bufferSize-1; + if (count > (npy_int32)bufferSize - 1) { + count = (npy_int32)bufferSize - 1; } memcpy(pCurOut, exponentBuffer, count); pCurOut += count; @@ -2148,7 +2108,8 @@ PrintInfNan(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, */ static npy_uint32 Dragon4_PrintFloat16(char *buffer, npy_uint32 bufferSize, npy_uint16 value, - npy_bool scientific, npy_bool unique, npy_int32 precision, + npy_bool scientific, DigitMode digit_mode, + CutoffMode cutoff_mode, npy_int32 precision, npy_bool sign, TrimMode trim_mode, npy_int32 digits_left, npy_int32 digits_right, npy_int32 exp_digits) { @@ -2231,20 +2192,21 @@ Dragon4_PrintFloat16(char *buffer, npy_uint32 bufferSize, npy_uint16 value, /* format the value */ if (scientific) { return FormatScientific(buffer, bufferSize, mantissa, exponent, signbit, - mantissaBit, hasUnequalMargins, unique, + mantissaBit, hasUnequalMargins, digit_mode, precision, trim_mode, digits_left, exp_digits); } else { return FormatPositional(buffer, bufferSize, mantissa, exponent, signbit, - mantissaBit, hasUnequalMargins, unique, - precision, trim_mode, + mantissaBit, hasUnequalMargins, digit_mode, + cutoff_mode, precision, trim_mode, digits_left, digits_right); } } static npy_uint32 Dragon4_PrintFloat32(char *buffer, npy_uint32 bufferSize, npy_float32 value, - npy_bool scientific, npy_bool unique, npy_int32 precision, + npy_bool scientific, DigitMode digit_mode, + CutoffMode cutoff_mode, npy_int32 precision, npy_bool sign, TrimMode trim_mode, npy_int32 digits_left, npy_int32 digits_right, npy_int32 exp_digits) { @@ -2327,20 +2289,21 @@ Dragon4_PrintFloat32(char *buffer, npy_uint32 bufferSize, npy_float32 value, /* format the value */ if (scientific) { return FormatScientific(buffer, bufferSize, mantissa, exponent, signbit, - mantissaBit, hasUnequalMargins, unique, + mantissaBit, hasUnequalMargins, digit_mode, precision, trim_mode, digits_left, exp_digits); } else { return FormatPositional(buffer, bufferSize, mantissa, exponent, signbit, - mantissaBit, hasUnequalMargins, unique, - precision, trim_mode, + mantissaBit, hasUnequalMargins, digit_mode, + cutoff_mode, precision, trim_mode, digits_left, digits_right); } } static npy_uint32 Dragon4_PrintFloat64(char *buffer, npy_uint32 bufferSize, npy_float64 value, - npy_bool scientific, npy_bool unique, npy_int32 precision, + npy_bool scientific, DigitMode digit_mode, + CutoffMode cutoff_mode, npy_int32 precision, npy_bool sign, TrimMode trim_mode, npy_int32 digits_left, npy_int32 digits_right, npy_int32 exp_digits) { @@ -2424,20 +2387,21 @@ Dragon4_PrintFloat64(char *buffer, npy_uint32 bufferSize, npy_float64 value, /* format the value */ if (scientific) { return FormatScientific(buffer, bufferSize, mantissa, exponent, signbit, - mantissaBit, hasUnequalMargins, unique, + mantissaBit, hasUnequalMargins, digit_mode, precision, trim_mode, digits_left, exp_digits); } else { return FormatPositional(buffer, bufferSize, mantissa, exponent, signbit, - mantissaBit, hasUnequalMargins, unique, - precision, trim_mode, + mantissaBit, hasUnequalMargins, digit_mode, + cutoff_mode, precision, trim_mode, digits_left, digits_right); } } static npy_uint32 Dragon4_PrintFloat128(char *buffer, npy_uint32 bufferSize, FloatVal128 value, - npy_bool scientific, npy_bool unique, npy_int32 precision, + npy_bool scientific, DigitMode digit_mode, + CutoffMode cutoff_mode, npy_int32 precision, npy_bool sign, TrimMode trim_mode, npy_int32 digits_left, npy_int32 digits_right, npy_int32 exp_digits) { @@ -2519,23 +2483,23 @@ Dragon4_PrintFloat128(char *buffer, npy_uint32 bufferSize, FloatVal128 value, /* format the value */ if (scientific) { return FormatScientific(buffer, bufferSize, mantissa, exponent, signbit, - mantissaBit, hasUnequalMargins, unique, + mantissaBit, hasUnequalMargins, digit_mode, precision, trim_mode, digits_left, exp_digits); } else { return FormatPositional(buffer, bufferSize, mantissa, exponent, signbit, - mantissaBit, hasUnequalMargins, unique, - precision, trim_mode, + mantissaBit, hasUnequalMargins, digit_mode, + cutoff_mode, precision, trim_mode, digits_left, digits_right); } } PyObject * -Dragon4_Positional_AnySize(void *val, size_t size, npy_bool unique, - int precision, int sign, TrimMode trim, - int pad_left, int pad_right) +Dragon4_Positional_AnySize(void *val, size_t size, DigitMode digit_mode, + CutoffMode cutoff_mode, int precision, int sign, + TrimMode trim, int pad_left, int pad_right) { - /* + /* * Use a very large buffer in case anyone tries to output a large numberG. * 16384 should be enough to uniquely print any float128, which goes up * to about 10^4932 */ @@ -2554,15 +2518,18 @@ Dragon4_Positional_AnySize(void *val, size_t size, npy_bool unique, switch (size) { case 2: Dragon4_PrintFloat16(repr, sizeof(repr), *(npy_float16*)val, - 0, unique, precision, sign, trim, pad_left, pad_right, -1); + 0, digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right, -1); break; case 4: Dragon4_PrintFloat32(repr, sizeof(repr), *(npy_float32*)val, - 0, unique, precision, sign, trim, pad_left, pad_right, -1); + 0, digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right, -1); break; case 8: Dragon4_PrintFloat64(repr, sizeof(repr), *(npy_float64*)val, - 0, unique, precision, sign, trim, pad_left, pad_right, -1); + 0, digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right, -1); break; #ifdef NPY_FLOAT80 case 10: @@ -2570,7 +2537,8 @@ Dragon4_Positional_AnySize(void *val, size_t size, npy_bool unique, val128.integer[0] = buf80.integer.a; val128.integer[1] = buf80.integer.b; Dragon4_PrintFloat128(repr, sizeof(repr), val128, - 0, unique, precision, sign, trim, pad_left, pad_right, -1); + 0, digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right, -1); break; #endif #ifdef NPY_FLOAT96 @@ -2579,7 +2547,8 @@ Dragon4_Positional_AnySize(void *val, size_t size, npy_bool unique, val128.integer[0] = buf96.integer.a; val128.integer[1] = buf96.integer.b; Dragon4_PrintFloat128(repr, sizeof(repr), val128, - 0, unique, precision, sign, trim, pad_left, pad_right, -1); + 0, digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right, -1); break; #endif #ifdef NPY_FLOAT128 @@ -2588,7 +2557,8 @@ Dragon4_Positional_AnySize(void *val, size_t size, npy_bool unique, val128.integer[0] = buf128.integer.a; val128.integer[1] = buf128.integer.b; Dragon4_PrintFloat128(repr, sizeof(repr), val128, - 0, unique, precision, sign, trim, pad_left, pad_right, -1); + 0, digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right, -1); break; #endif default: @@ -2600,42 +2570,48 @@ Dragon4_Positional_AnySize(void *val, size_t size, npy_bool unique, } PyObject * -Dragon4_Positional(PyObject *obj, npy_bool unique, int precision, int sign, - TrimMode trim, int pad_left, int pad_right) +Dragon4_Positional(PyObject *obj, DigitMode digit_mode, CutoffMode cutoff_mode, + int precision, int sign, TrimMode trim, int pad_left, + int pad_right) { double val; if (PyArray_IsScalar(obj, Half)) { npy_half x = ((PyHalfScalarObject *)obj)->obval; - return Dragon4_Positional_AnySize(&x, sizeof(npy_half), unique, - precision, sign, trim, pad_left, pad_right); + return Dragon4_Positional_AnySize(&x, sizeof(npy_half), + digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right); } else if (PyArray_IsScalar(obj, Float)) { npy_float x = ((PyFloatScalarObject *)obj)->obval; - return Dragon4_Positional_AnySize(&x, sizeof(npy_float), unique, - precision, sign, trim, pad_left, pad_right); + return Dragon4_Positional_AnySize(&x, sizeof(npy_float), + digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right); } else if (PyArray_IsScalar(obj, Double)) { npy_double x = ((PyDoubleScalarObject *)obj)->obval; - return Dragon4_Positional_AnySize(&x, sizeof(npy_double), unique, - precision, sign, trim, pad_left, pad_right); + return Dragon4_Positional_AnySize(&x, sizeof(npy_double), + digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right); } else if (PyArray_IsScalar(obj, LongDouble)) { npy_longdouble x = ((PyLongDoubleScalarObject *)obj)->obval; - return Dragon4_Positional_AnySize(&x, sizeof(npy_longdouble), unique, - precision, sign, trim, pad_left, pad_right); + return Dragon4_Positional_AnySize(&x, sizeof(npy_longdouble), + digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right); } val = PyFloat_AsDouble(obj); if (PyErr_Occurred()) { return NULL; } - return Dragon4_Positional_AnySize(&val, sizeof(double), unique, - precision, sign, trim, pad_left, pad_right); + return Dragon4_Positional_AnySize(&val, sizeof(double), + digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right); } PyObject * -Dragon4_Scientific_AnySize(void *val, size_t size, npy_bool unique, +Dragon4_Scientific_AnySize(void *val, size_t size, DigitMode digit_mode, int precision, int sign, TrimMode trim, int pad_left, int exp_digits) { @@ -2652,19 +2628,24 @@ Dragon4_Scientific_AnySize(void *val, size_t size, npy_bool unique, FloatUnion128 buf128; #endif + /* dummy, is ignored in scientific mode */ + CutoffMode cutoff_mode = CutoffMode_TotalLength; switch (size) { case 2: Dragon4_PrintFloat16(repr, sizeof(repr), *(npy_float16*)val, - 1, unique, precision, sign, trim, pad_left, -1, exp_digits); + 1, digit_mode, cutoff_mode, precision, sign, + trim, pad_left, -1, exp_digits); break; case 4: Dragon4_PrintFloat32(repr, sizeof(repr), *(npy_float32*)val, - 1, unique, precision, sign, trim, pad_left, -1, exp_digits); + 1, digit_mode, cutoff_mode, precision, sign, + trim, pad_left, -1, exp_digits); break; case 8: Dragon4_PrintFloat64(repr, sizeof(repr), *(npy_float64*)val, - 1, unique, precision, sign, trim, pad_left, -1, exp_digits); + 1, digit_mode, cutoff_mode, precision, sign, + trim, pad_left, -1, exp_digits); break; #ifdef NPY_FLOAT80 case 10: @@ -2672,7 +2653,8 @@ Dragon4_Scientific_AnySize(void *val, size_t size, npy_bool unique, val128.integer[0] = buf80.integer.a; val128.integer[1] = buf80.integer.b; Dragon4_PrintFloat128(repr, sizeof(repr), val128, - 1, unique, precision, sign, trim, pad_left, -1, exp_digits); + 1, digit_mode, cutoff_mode, precision, sign, + trim, pad_left, -1, exp_digits); break; #endif #ifdef NPY_FLOAT96 @@ -2681,7 +2663,8 @@ Dragon4_Scientific_AnySize(void *val, size_t size, npy_bool unique, val128.integer[0] = buf96.integer.a; val128.integer[1] = buf96.integer.b; Dragon4_PrintFloat128(repr, sizeof(repr), val128, - 1, unique, precision, sign, trim, pad_left, -1, exp_digits); + 1, digit_mode, cutoff_mode, precision, sign, + trim, pad_left, -1, exp_digits); break; #endif #ifdef NPY_FLOAT128 @@ -2690,7 +2673,8 @@ Dragon4_Scientific_AnySize(void *val, size_t size, npy_bool unique, val128.integer[0] = buf128.integer.a; val128.integer[1] = buf128.integer.b; Dragon4_PrintFloat128(repr, sizeof(repr), val128, - 1, unique, precision, sign, trim, pad_left, -1, exp_digits); + 1, digit_mode, cutoff_mode, precision, sign, + trim, pad_left, -1, exp_digits); break; #endif default: @@ -2702,36 +2686,41 @@ Dragon4_Scientific_AnySize(void *val, size_t size, npy_bool unique, } PyObject * -Dragon4_Scientific(PyObject *obj, npy_bool unique, int precision, int sign, - TrimMode trim, int pad_left, int exp_digits) +Dragon4_Scientific(PyObject *obj, DigitMode digit_mode, int precision, + int sign, TrimMode trim, int pad_left, int exp_digits) { double val; if (PyArray_IsScalar(obj, Half)) { npy_half x = ((PyHalfScalarObject *)obj)->obval; - return Dragon4_Scientific_AnySize(&x, sizeof(npy_half), unique, - precision, sign, trim, pad_left, exp_digits); + return Dragon4_Scientific_AnySize(&x, sizeof(npy_half), + digit_mode, precision, + sign, trim, pad_left, exp_digits); } else if (PyArray_IsScalar(obj, Float)) { npy_float x = ((PyFloatScalarObject *)obj)->obval; - return Dragon4_Scientific_AnySize(&x, sizeof(npy_float), unique, - precision, sign, trim, pad_left, exp_digits); + return Dragon4_Scientific_AnySize(&x, sizeof(npy_float), + digit_mode, precision, + sign, trim, pad_left, exp_digits); } else if (PyArray_IsScalar(obj, Double)) { npy_double x = ((PyDoubleScalarObject *)obj)->obval; - return Dragon4_Scientific_AnySize(&x, sizeof(npy_double), unique, - precision, sign, trim, pad_left, exp_digits); + return Dragon4_Scientific_AnySize(&x, sizeof(npy_double), + digit_mode, precision, + sign, trim, pad_left, exp_digits); } else if (PyArray_IsScalar(obj, LongDouble)) { npy_longdouble x = ((PyLongDoubleScalarObject *)obj)->obval; - return Dragon4_Scientific_AnySize(&x, sizeof(npy_longdouble), unique, - precision, sign, trim, pad_left, exp_digits); + return Dragon4_Scientific_AnySize(&x, sizeof(npy_longdouble), + digit_mode, precision, + sign, trim, pad_left, exp_digits); } val = PyFloat_AsDouble(obj); if (PyErr_Occurred()) { return NULL; } - return Dragon4_Scientific_AnySize(&val, sizeof(double), unique, - precision, sign, trim, pad_left, exp_digits); + return Dragon4_Scientific_AnySize(&val, sizeof(double), + digit_mode, precision, + sign, trim, pad_left, exp_digits); } diff --git a/numpy/core/src/multiarray/dragon4.h b/numpy/core/src/multiarray/dragon4.h index 814c84a2f..5559c5157 100644 --- a/numpy/core/src/multiarray/dragon4.h +++ b/numpy/core/src/multiarray/dragon4.h @@ -40,6 +40,22 @@ #include "npy_pycompat.h" #include "numpy/arrayscalars.h" +typedef enum DigitMode +{ + /* Round digits to print shortest uniquely identifiable number. */ + DigitMode_Unique, + /* Output the digits of the number as if with infinite precision */ + DigitMode_Exact, +} DigitMode; + +typedef enum CutoffMode +{ + /* up to cutoffNumber significant digits */ + CutoffMode_TotalLength, + /* up to cutoffNumber significant digits past the decimal point */ + CutoffMode_FractionLength, +} CutoffMode; + typedef enum TrimMode { TrimMode_None, /* don't trim zeros, always leave a decimal point */ @@ -49,22 +65,23 @@ typedef enum TrimMode } TrimMode; PyObject * -Dragon4_Positional_AnySize(void *val, size_t size, npy_bool unique, - int precision, int sign, TrimMode trim, - int pad_left, int pad_right); +Dragon4_Positional_AnySize(void *val, size_t size, DigitMode digit_mode, + CutoffMode cutoff_mode, int precision, int sign, + TrimMode trim, int pad_left, int pad_right); PyObject * -Dragon4_Scientific_AnySize(void *val, size_t size, npy_bool unique, +Dragon4_Scientific_AnySize(void *val, size_t size, DigitMode digit_mode, int precision, int sign, TrimMode trim, - int pad_left, int exp_digits); + int pad_left, int pad_right); PyObject * -Dragon4_Positional(PyObject *obj, npy_bool unique, int precision, int sign, - TrimMode trim, int pad_left, int pad_right); +Dragon4_Positional(PyObject *obj, DigitMode digit_mode, CutoffMode cutoff_mode, + int precision, int sign, TrimMode trim, int pad_left, + int pad_right); PyObject * -Dragon4_Scientific(PyObject *obj, npy_bool unique, int precision, int sign, - TrimMode trim, int pad_left, int exp_digits); +Dragon4_Scientific(PyObject *obj, DigitMode digit_mode, int precision, + int sign, TrimMode trim, int pad_left, int exp_digits); #endif diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src index 943b8aecf..7db606194 100644 --- a/numpy/core/src/multiarray/einsum.c.src +++ b/numpy/core/src/multiarray/einsum.c.src @@ -1905,7 +1905,7 @@ parse_operand_subscripts(char *subscripts, int length, /* * Find any labels duplicated for this operand, and turn them - * into negative offets to the axis to merge with. + * into negative offsets to the axis to merge with. * * In C, the char type may be signed or unsigned, but with * twos complement arithmetic the char is ok either way here, and diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c index 7f56ddb03..d3dcc934f 100644 --- a/numpy/core/src/multiarray/flagsobject.c +++ b/numpy/core/src/multiarray/flagsobject.c @@ -88,7 +88,7 @@ PyArray_UpdateFlags(PyArrayObject *ret, int flagmask) /* * Check whether the given array is stored contiguously - * in memory. And update the passed in ap flags apropriately. + * in memory. And update the passed in ap flags appropriately. * * The traditional rule is that for an array to be flagged as C contiguous, * the following must hold: @@ -208,11 +208,10 @@ arrayflags_dealloc(PyArrayFlagsObject *self) _define_get(NPY_ARRAY_C_CONTIGUOUS, contiguous) _define_get(NPY_ARRAY_F_CONTIGUOUS, fortran) -_define_get(NPY_ARRAY_UPDATEIFCOPY, updateifcopy) +_define_get(NPY_ARRAY_WRITEBACKIFCOPY, writebackifcopy) _define_get(NPY_ARRAY_OWNDATA, owndata) _define_get(NPY_ARRAY_ALIGNED, aligned) _define_get(NPY_ARRAY_WRITEABLE, writeable) - _define_get(NPY_ARRAY_ALIGNED| NPY_ARRAY_WRITEABLE, behaved) _define_get(NPY_ARRAY_ALIGNED| @@ -220,6 +219,25 @@ _define_get(NPY_ARRAY_ALIGNED| NPY_ARRAY_C_CONTIGUOUS, carray) static PyObject * +arrayflags_updateifcopy_get(PyArrayFlagsObject *self) +{ + PyObject *item; + /* 2017-Nov-10 1.14 */ + if(DEPRECATE("UPDATEIFCOPY deprecated, use WRITEBACKIFCOPY instead") < 0) { + return NULL; + } + if ((self->flags & (NPY_ARRAY_UPDATEIFCOPY)) == (NPY_ARRAY_UPDATEIFCOPY)) { + item = Py_True; + } + else { + item = Py_False; + } + Py_INCREF(item); + return item; +} + + +static PyObject * arrayflags_forc_get(PyArrayFlagsObject *self) { PyObject *item; @@ -291,6 +309,35 @@ arrayflags_updateifcopy_set(PyArrayFlagsObject *self, PyObject *obj) "Cannot set flags on array scalars."); return -1; } + /* 2017-Nov-10 1.14 */ + if(DEPRECATE("UPDATEIFCOPY deprecated, use WRITEBACKIFCOPY instead") < 0) { + return -1; + } + res = PyObject_CallMethod(self->arr, "setflags", "OOO", Py_None, Py_None, + (PyObject_IsTrue(obj) ? Py_True : Py_False)); + if (res == NULL) { + return -1; + } + Py_DECREF(res); + return 0; +} + +/* relies on setflags order being write, align, uic */ +static int +arrayflags_writebackifcopy_set(PyArrayFlagsObject *self, PyObject *obj) +{ + PyObject *res; + + if (obj == NULL) { + PyErr_SetString(PyExc_AttributeError, + "Cannot delete flags writebackifcopy attribute"); + return -1; + } + if (self->arr == NULL) { + PyErr_SetString(PyExc_ValueError, + "Cannot set flags on array scalars."); + return -1; + } res = PyObject_CallMethod(self->arr, "setflags", "OOO", Py_None, Py_None, (PyObject_IsTrue(obj) ? Py_True : Py_False)); if (res == NULL) { @@ -372,6 +419,10 @@ static PyGetSetDef arrayflags_getsets[] = { (getter)arrayflags_updateifcopy_get, (setter)arrayflags_updateifcopy_set, NULL, NULL}, + {"writebackifcopy", + (getter)arrayflags_writebackifcopy_get, + (setter)arrayflags_writebackifcopy_set, + NULL, NULL}, {"owndata", (getter)arrayflags_owndata_get, NULL, @@ -455,6 +506,8 @@ arrayflags_getitem(PyArrayFlagsObject *self, PyObject *ind) return arrayflags_owndata_get(self); case 'A': return arrayflags_aligned_get(self); + case 'X': + return arrayflags_writebackifcopy_get(self); case 'U': return arrayflags_updateifcopy_get(self); default: @@ -522,6 +575,11 @@ arrayflags_getitem(PyArrayFlagsObject *self, PyObject *ind) return arrayflags_fortran_get(self); } break; + case 14: + if (strncmp(key, "WRITEBACKIFCOPY", n) == 0) { + return arrayflags_writebackifcopy_get(self); + } + break; } fail: @@ -564,6 +622,10 @@ arrayflags_setitem(PyArrayFlagsObject *self, PyObject *ind, PyObject *item) ((n==1) && (strncmp(key, "U", n) == 0))) { return arrayflags_updateifcopy_set(self, item); } + else if (((n==14) && (strncmp(key, "WRITEBACKIFCOPY", n) == 0)) || + ((n==1) && (strncmp(key, "X", n) == 0))) { + return arrayflags_writebackifcopy_set(self, item); + } fail: PyErr_SetString(PyExc_KeyError, "Unknown flag"); @@ -589,16 +651,17 @@ arrayflags_print(PyArrayFlagsObject *self) return PyUString_FromFormat( " %s : %s\n %s : %s\n" " %s : %s\n %s : %s\n" - " %s : %s\n %s : %s", - "C_CONTIGUOUS", _torf_(fl, NPY_ARRAY_C_CONTIGUOUS), - "F_CONTIGUOUS", _torf_(fl, NPY_ARRAY_F_CONTIGUOUS), - "OWNDATA", _torf_(fl, NPY_ARRAY_OWNDATA), - "WRITEABLE", _torf_(fl, NPY_ARRAY_WRITEABLE), - "ALIGNED", _torf_(fl, NPY_ARRAY_ALIGNED), - "UPDATEIFCOPY", _torf_(fl, NPY_ARRAY_UPDATEIFCOPY)); + " %s : %s\n %s : %s\n" + " %s : %s", + "C_CONTIGUOUS", _torf_(fl, NPY_ARRAY_C_CONTIGUOUS), + "F_CONTIGUOUS", _torf_(fl, NPY_ARRAY_F_CONTIGUOUS), + "OWNDATA", _torf_(fl, NPY_ARRAY_OWNDATA), + "WRITEABLE", _torf_(fl, NPY_ARRAY_WRITEABLE), + "ALIGNED", _torf_(fl, NPY_ARRAY_ALIGNED), + "WRITEBACKIFCOPY", _torf_(fl, NPY_ARRAY_WRITEBACKIFCOPY), + "UPDATEIFCOPY", _torf_(fl, NPY_ARRAY_UPDATEIFCOPY)); } - static int arrayflags_compare(PyArrayFlagsObject *self, PyArrayFlagsObject *other) { diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c index a43675040..825363f19 100644 --- a/numpy/core/src/multiarray/getset.c +++ b/numpy/core/src/multiarray/getset.c @@ -365,9 +365,11 @@ array_data_set(PyArrayObject *self, PyObject *op) PyDataMem_FREE(PyArray_DATA(self)); } if (PyArray_BASE(self)) { - if (PyArray_FLAGS(self) & NPY_ARRAY_UPDATEIFCOPY) { + if ((PyArray_FLAGS(self) & NPY_ARRAY_WRITEBACKIFCOPY) || + (PyArray_FLAGS(self) & NPY_ARRAY_UPDATEIFCOPY)) { PyArray_ENABLEFLAGS((PyArrayObject *)PyArray_BASE(self), NPY_ARRAY_WRITEABLE); + PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY); PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY); } Py_DECREF(PyArray_BASE(self)); @@ -614,7 +616,7 @@ array_struct_get(PyArrayObject *self) inter->itemsize = PyArray_DESCR(self)->elsize; inter->flags = PyArray_FLAGS(self); /* reset unused flags */ - inter->flags &= ~(NPY_ARRAY_UPDATEIFCOPY | NPY_ARRAY_OWNDATA); + inter->flags &= ~(NPY_ARRAY_WRITEBACKIFCOPY | NPY_ARRAY_UPDATEIFCOPY |NPY_ARRAY_OWNDATA); if (PyArray_ISNOTSWAPPED(self)) inter->flags |= NPY_ARRAY_NOTSWAPPED; /* * Copy shape and strides over since these can be reset diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 3b5d76362..eb9ef5915 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -14,6 +14,7 @@ #include "npy_pycompat.h" +#include "multiarraymodule.h" #include "common.h" #include "arrayobject.h" #include "ctors.h" @@ -87,8 +88,7 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, } else { - int flags = NPY_ARRAY_CARRAY | - NPY_ARRAY_UPDATEIFCOPY; + int flags = NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY; if ((PyArray_NDIM(out) != nd) || !PyArray_CompareLists(PyArray_DIMS(out), shape, nd)) { @@ -235,13 +235,15 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, Py_XDECREF(self); if (out != NULL && out != obj) { Py_INCREF(out); + PyArray_ResolveWritebackIfCopy(obj); Py_DECREF(obj); obj = out; } return (PyObject *)obj; fail: - PyArray_XDECREF_ERR(obj); + PyArray_DiscardWritebackIfCopy(obj); + Py_XDECREF(obj); Py_XDECREF(indices); Py_XDECREF(self); return NULL; @@ -273,7 +275,7 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, if (!PyArray_ISCONTIGUOUS(self)) { PyArrayObject *obj; - int flags = NPY_ARRAY_CARRAY | NPY_ARRAY_UPDATEIFCOPY; + int flags = NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY; if (clipmode == NPY_RAISE) { flags |= NPY_ARRAY_ENSURECOPY; @@ -407,6 +409,7 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, Py_XDECREF(values); Py_XDECREF(indices); if (copied) { + PyArray_ResolveWritebackIfCopy(self); Py_DECREF(self); } Py_RETURN_NONE; @@ -415,7 +418,8 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, Py_XDECREF(indices); Py_XDECREF(values); if (copied) { - PyArray_XDECREF_ERR(self); + PyArray_DiscardWritebackIfCopy(self); + Py_XDECREF(self); } return NULL; } @@ -448,7 +452,7 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0) dtype = PyArray_DESCR(self); Py_INCREF(dtype); obj = (PyArrayObject *)PyArray_FromArray(self, dtype, - NPY_ARRAY_CARRAY | NPY_ARRAY_UPDATEIFCOPY); + NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY); if (obj != self) { copied = 1; } @@ -524,6 +528,7 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0) Py_XDECREF(values); Py_XDECREF(mask); if (copied) { + PyArray_ResolveWritebackIfCopy(self); Py_DECREF(self); } Py_RETURN_NONE; @@ -532,7 +537,8 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0) Py_XDECREF(mask); Py_XDECREF(values); if (copied) { - PyArray_XDECREF_ERR(self); + PyArray_DiscardWritebackIfCopy(self); + Py_XDECREF(self); } return NULL; } @@ -694,7 +700,7 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, } else { int flags = NPY_ARRAY_CARRAY | - NPY_ARRAY_UPDATEIFCOPY | + NPY_ARRAY_WRITEBACKIFCOPY | NPY_ARRAY_FORCECAST; if ((PyArray_NDIM(out) != multi->nd) @@ -769,6 +775,7 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, npy_free_cache(mps, n * sizeof(mps[0])); if (out != NULL && out != obj) { Py_INCREF(out); + PyArray_ResolveWritebackIfCopy(obj); Py_DECREF(obj); obj = out; } @@ -781,7 +788,8 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, } Py_XDECREF(ap); npy_free_cache(mps, n * sizeof(mps[0])); - PyArray_XDECREF_ERR(obj); + PyArray_DiscardWritebackIfCopy(obj); + Py_XDECREF(obj); return NULL; } @@ -1811,26 +1819,17 @@ PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int axis2) } /* Handle negative axes with standard Python indexing rules */ - if (axis1 < 0) { - axis1 += ndim; + if (check_and_adjust_axis_msg(&axis1, ndim, npy_ma_str_axis1) < 0) { + return NULL; } - if (axis2 < 0) { - axis2 += ndim; + if (check_and_adjust_axis_msg(&axis2, ndim, npy_ma_str_axis2) < 0) { + return NULL; } - - /* Error check the two axes */ if (axis1 == axis2) { PyErr_SetString(PyExc_ValueError, "axis1 and axis2 cannot be the same"); return NULL; } - else if (axis1 < 0 || axis1 >= ndim || axis2 < 0 || axis2 >= ndim) { - PyErr_Format(PyExc_ValueError, - "axis1(=%d) and axis2(=%d) " - "must be within range (ndim=%d)", - axis1, axis2, ndim); - return NULL; - } /* Get the shape and strides of the two axes */ shape = PyArray_SHAPE(self); diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c index 9e6ed712c..723c565f0 100644 --- a/numpy/core/src/multiarray/iterators.c +++ b/numpy/core/src/multiarray/iterators.c @@ -243,7 +243,9 @@ array_iter_base_init(PyArrayIterObject *it, PyArrayObject *ao) it->ao = ao; it->size = PyArray_SIZE(ao); it->nd_m1 = nd - 1; - it->factors[nd-1] = 1; + if (nd != 0) { + it->factors[nd-1] = 1; + } for (i = 0; i < nd; i++) { it->dims_m1[i] = PyArray_DIMS(ao)[i] - 1; it->strides[i] = PyArray_STRIDES(ao)[i]; @@ -340,7 +342,9 @@ PyArray_BroadcastToShape(PyObject *obj, npy_intp *dims, int nd) it->ao = ao; it->size = PyArray_MultiplyList(dims, nd); it->nd_m1 = nd - 1; - it->factors[nd-1] = 1; + if (nd != 0) { + it->factors[nd-1] = 1; + } for (i = 0; i < nd; i++) { it->dims_m1[i] = dims[i] - 1; k = i - diff; @@ -1149,6 +1153,7 @@ iter_richcompare(PyArrayIterObject *self, PyObject *other, int cmp_op) return NULL; } ret = array_richcompare(new, other, cmp_op); + PyArray_ResolveWritebackIfCopy(new); Py_DECREF(new); return ret; } @@ -1321,7 +1326,9 @@ PyArray_Broadcast(PyArrayMultiIterObject *mit) it->nd_m1 = mit->nd - 1; it->size = tmp; nd = PyArray_NDIM(it->ao); - it->factors[mit->nd-1] = 1; + if (nd != 0) { + it->factors[mit->nd-1] = 1; + } for (j = 0; j < mit->nd; j++) { it->dims_m1[j] = mit->dimensions[j] - 1; k = j + nd - mit->nd; @@ -1715,7 +1722,7 @@ static PyMethodDef arraymultiter_methods[] = { {"reset", (PyCFunction) arraymultiter_reset, METH_VARARGS, NULL}, - {NULL, NULL, 0, NULL}, /* sentinal */ + {NULL, NULL, 0, NULL}, /* sentinel */ }; NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type = { diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index 1a92365c8..eca4e98be 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -206,7 +206,7 @@ unpack_scalar(PyObject *index, PyObject **result, npy_intp result_n) * to. The references written are new. * @param result_n The length of the result buffer * - * @returns The number of items in `result`, or -1 if an error occured. + * @returns The number of items in `result`, or -1 if an error occurred. * The entries in `result` at and beyond this index should be * assumed to contain garbage, even if they were initialized * to NULL, so are not safe to Py_XDECREF. Use multi_DECREF to @@ -1396,11 +1396,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view) *view = NULL; /* first check for a single field name */ -#if defined(NPY_PY3K) - if (PyUnicode_Check(ind)) { -#else - if (PyString_Check(ind) || PyUnicode_Check(ind)) { -#endif + if (PyBaseString_Check(ind)) { PyObject *tup; PyArray_Descr *fieldtype; npy_intp offset; @@ -1477,11 +1473,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view) return -1; } -#if defined(NPY_PY3K) - if (!PyUnicode_Check(name)) { -#else - if (!PyString_Check(name) && !PyUnicode_Check(name)) { -#endif + if (!PyBaseString_Check(name)) { Py_DECREF(name); Py_DECREF(fields); Py_DECREF(names); @@ -1521,7 +1513,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view) PyObject *errmsg = PyUString_FromString( "duplicate field of name "); PyUString_ConcatAndDel(&errmsg, name); - PyErr_SetObject(PyExc_KeyError, errmsg); + PyErr_SetObject(PyExc_ValueError, errmsg); Py_DECREF(errmsg); Py_DECREF(fields); Py_DECREF(names); @@ -3269,7 +3261,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, * If copy_if_overlap != 0, check if `a` has memory overlap with any of the * arrays in `index` and with `extra_op`. If yes, make copies as appropriate * to avoid problems if `a` is modified during the iteration. - * `iter->array` may contain a copied array (with UPDATEIFCOPY set). + * `iter->array` may contain a copied array (UPDATEIFCOPY/WRITEBACKIFCOPY set). */ NPY_NO_EXPORT PyObject * PyArray_MapIterArrayCopyIfOverlap(PyArrayObject * a, PyObject * index, @@ -3303,7 +3295,7 @@ PyArray_MapIterArrayCopyIfOverlap(PyArrayObject * a, PyObject * index, } Py_INCREF(a); - if (PyArray_SetUpdateIfCopyBase(a_copy, a) < 0) { + if (PyArray_SetWritebackIfCopyBase(a_copy, a) < 0) { goto fail; } diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index 6a121574b..cd88ab76b 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -329,16 +329,7 @@ array_min(PyArrayObject *self, PyObject *args, PyObject *kwds) static PyObject * array_ptp(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis = NPY_MAXDIMS; - PyArrayObject *out = NULL; - static char *kwlist[] = {"axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&:ptp", kwlist, - PyArray_AxisConverter, &axis, - PyArray_OutputConverter, &out)) - return NULL; - - return PyArray_Ptp(self, axis, out); + NPY_FORWARD_NDARRAY_METHOD("_ptp"); } @@ -1782,6 +1773,7 @@ array_setstate(PyArrayObject *self, PyObject *args) Py_XDECREF(PyArray_BASE(self)); fa->base = NULL; + PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY); PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY); if (PyArray_DIMS(self) != NULL) { @@ -2323,11 +2315,12 @@ array_setflags(PyArrayObject *self, PyObject *args, PyObject *kwds) if (PyObject_IsTrue(uic)) { fa->flags = flagback; PyErr_SetString(PyExc_ValueError, - "cannot set UPDATEIFCOPY " \ + "cannot set WRITEBACKIFCOPY " \ "flag to True"); return NULL; } else { + PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY); PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY); Py_XDECREF(fa->base); fa->base = NULL; @@ -2450,7 +2443,7 @@ array_complex(PyArrayObject *self, PyObject *NPY_UNUSED(args)) static PyObject * array_getslice(PyArrayObject *self, PyObject *args) { - PyObject *start, *stop, *slice; + PyObject *start, *stop, *slice, *result; if (!PyArg_ParseTuple(args, "OO:__getslice__", &start, &stop)) { return NULL; } @@ -2461,7 +2454,9 @@ array_getslice(PyArrayObject *self, PyObject *args) } /* Deliberately delegate to subclasses */ - return PyObject_GetItem((PyObject *)self, slice); + result = PyObject_GetItem((PyObject *)self, slice); + Py_DECREF(slice); + return result; } static PyObject * @@ -2479,9 +2474,10 @@ array_setslice(PyArrayObject *self, PyObject *args) /* Deliberately delegate to subclasses */ if (PyObject_SetItem((PyObject *)self, slice, value) < 0) { + Py_DECREF(slice); return NULL; } - + Py_DECREF(slice); Py_RETURN_NONE; } @@ -2503,6 +2499,12 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { (PyCFunction)array_ufunc, METH_VARARGS | METH_KEYWORDS, NULL}, +#ifndef NPY_PY3K + {"__unicode__", + (PyCFunction)array_unicode, + METH_NOARGS, NULL}, +#endif + /* for the sys module */ {"__sizeof__", (PyCFunction) array_sizeof, diff --git a/numpy/core/src/multiarray/multiarray_tests.c.src b/numpy/core/src/multiarray/multiarray_tests.c.src index a20cf6257..d63349560 100644 --- a/numpy/core/src/multiarray/multiarray_tests.c.src +++ b/numpy/core/src/multiarray/multiarray_tests.c.src @@ -8,6 +8,8 @@ #include "npy_extint128.h" #include "common.h" +#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0])) + /* test PyArray_IsPythonScalar, before including private py3 compat header */ static PyObject * IsPythonScalar(PyObject * dummy, PyObject *args) @@ -619,6 +621,54 @@ npy_char_deprecation(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args)) return (PyObject *)descr; } +/* used to test UPDATEIFCOPY usage emits deprecation warning */ +static PyObject* +npy_updateifcopy_deprecation(PyObject* NPY_UNUSED(self), PyObject* args) +{ + int flags; + PyObject* array; + if (!PyArray_Check(args)) { + PyErr_SetString(PyExc_TypeError, "test needs ndarray input"); + return NULL; + } + flags = NPY_ARRAY_CARRAY | NPY_ARRAY_UPDATEIFCOPY; + array = PyArray_FromArray((PyArrayObject*)args, NULL, flags); + if (array == NULL) + return NULL; + PyArray_ResolveWritebackIfCopy((PyArrayObject*)array); + Py_DECREF(array); + Py_RETURN_NONE; +} + +/* used to create array with WRITEBACKIFCOPY flag */ +static PyObject* +npy_create_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args) +{ + int flags; + PyObject* array; + if (!PyArray_Check(args)) { + PyErr_SetString(PyExc_TypeError, "test needs ndarray input"); + return NULL; + } + flags = NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY; + array = PyArray_FromArray((PyArrayObject*)args, NULL, flags); + if (array == NULL) + return NULL; + return array; +} + +/* resolve WRITEBACKIFCOPY */ +static PyObject* +npy_resolve(PyObject* NPY_UNUSED(self), PyObject* args) +{ + if (!PyArray_Check(args)) { + PyErr_SetString(PyExc_TypeError, "test needs ndarray input"); + return NULL; + } + PyArray_ResolveWritebackIfCopy((PyArrayObject*)args); + Py_RETURN_NONE; +} + #if !defined(NPY_PY3K) static PyObject * int_subclass(PyObject *dummy, PyObject *args) @@ -988,7 +1038,7 @@ array_solve_diophantine(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject return NULL; } - if (PyTuple_GET_SIZE(A) > sizeof(terms) / sizeof(diophantine_term_t)) { + if (PyTuple_GET_SIZE(A) > (Py_ssize_t)ARRAY_SIZE(terms)) { PyErr_SetString(PyExc_ValueError, "too many terms in equation"); goto fail; } @@ -1708,6 +1758,15 @@ static PyMethodDef Multiarray_TestsMethods[] = { {"npy_char_deprecation", npy_char_deprecation, METH_NOARGS, NULL}, + {"npy_updateifcopy_deprecation", + npy_updateifcopy_deprecation, + METH_O, NULL}, + {"npy_create_writebackifcopy", + npy_create_writebackifcopy, + METH_O, NULL}, + {"npy_resolve", + npy_resolve, + METH_O, NULL}, #if !defined(NPY_PY3K) {"test_int_subclass", int_subclass, diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 9ee53362e..0008cb04b 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -62,9 +62,28 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "compiled_base.h" #include "mem_overlap.h" #include "alloc.h" +#include "typeinfo.h" #include "get_attr_string.h" +/* + * global variable to determine if legacy printing is enabled, accessible from + * C. For simplicity the mode is encoded as an integer where '0' means no + * legacy mode, and '113' means 1.13 legacy mode. We can upgrade this if we + * have more complex requirements in the future. + */ +int npy_legacy_print_mode = 0; + +static PyObject * +set_legacy_print_mode(PyObject *NPY_UNUSED(self), PyObject *args) +{ + if (!PyArg_ParseTuple(args, "i", &npy_legacy_print_mode)) { + return NULL; + } + Py_RETURN_NONE; +} + + /* Only here for API compatibility */ NPY_NO_EXPORT PyTypeObject PyBigArray_Type; @@ -218,7 +237,8 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd, n = PyArray_DIMS(ap)[0]; ptr2 = (char **)PyArray_malloc(n * sizeof(char *)); if (!ptr2) { - goto fail; + PyErr_NoMemory(); + return -1; } for (i = 0; i < n; i++) { ptr2[i] = PyArray_BYTES(ap) + i*PyArray_STRIDES(ap)[0]; @@ -230,7 +250,8 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd, m = PyArray_DIMS(ap)[1]; ptr3 = (char ***)PyArray_malloc(n*(m+1) * sizeof(char *)); if (!ptr3) { - goto fail; + PyErr_NoMemory(); + return -1; } for (i = 0; i < n; i++) { ptr3[i] = (char **) &ptr3[n + m * i]; @@ -243,10 +264,6 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd, memcpy(dims, PyArray_DIMS(ap), nd*sizeof(npy_intp)); *op = (PyObject *)ap; return 0; - -fail: - PyErr_SetString(PyExc_MemoryError, "no memory"); - return -1; } /* Deprecated --- Use PyArray_AsCArray instead */ @@ -830,7 +847,7 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, /* set copy-back */ Py_INCREF(out); - if (PyArray_SetUpdateIfCopyBase(out_buf, out) < 0) { + if (PyArray_SetWritebackIfCopyBase(out_buf, out) < 0) { Py_DECREF(out); Py_DECREF(out_buf); return NULL; @@ -1102,6 +1119,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) Py_DECREF(ap2); /* Trigger possible copy-back into `result` */ + PyArray_ResolveWritebackIfCopy(out_buf); Py_DECREF(out_buf); return (PyObject *)result; @@ -1309,6 +1327,7 @@ _pyarray_revert(PyArrayObject *ret) else { char *tmp = PyArray_malloc(PyArray_DESCR(ret)->elsize); if (tmp == NULL) { + PyErr_NoMemory(); return -1; } sw2 = op + (length - 1) * os; @@ -3587,27 +3606,9 @@ as_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) /* * Prints floating-point scalars usign the Dragon4 algorithm, scientific mode. - * Arguments: - * x - a numpy scalar of Floating type - * precision - number of fractional digits to show. In unique mode, can be - * ommited and the unique repr will be returned, otherwise the - * unique value will be truncated to this number of digits - * (breaking the uniqueness guarantee). In fixed mode, is - * required, and specifies the number of fractional digits to - * print. - * unique - whether to use unique (default) or fixed mode. - * sign - whether to show the sign for positive values. Default False - * trim - one of 'k', '.', '0', '-' to control trailing digits, as follows: - * k : don't trim zeros, always leave a decimal point - * . : trim all but the zero before the decimal point - * 0 : trim all trailing zeros, leave decimal point - * - : trim trailing zeros and a trailing decimal point - * Default is k. - * pad_left - pads left side of string with whitespace until at least - * this many characters are to the left of the decimal point. If - * -1, don't add any padding. Default -1. - * exp_digits - exponent will contain at least this many digits, padding - * with 0 if necessary. -1 means pad to 2. Maximum of 5. + * See docstring of `np.format_float_scientific` for description of arguments. + * The differences is that a value of -1 is valid for pad_left, exp_digits, + * precision, which is equivalent to `None`. */ static PyObject * dragon4_scientific(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) @@ -3617,11 +3618,12 @@ dragon4_scientific(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) "pad_left", "exp_digits", NULL}; int precision=-1, pad_left=-1, exp_digits=-1; char *trimstr=NULL; + DigitMode digit_mode; TrimMode trim = TrimMode_None; int sign=0, unique=1; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|iiisii", kwlist, - &obj, &precision, &unique, &sign, &trimstr, &pad_left, + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|iiisii:dragon4_scientific", + kwlist, &obj, &precision, &unique, &sign, &trimstr, &pad_left, &exp_digits)) { return NULL; } @@ -3646,55 +3648,40 @@ dragon4_scientific(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) } } + digit_mode = unique ? DigitMode_Unique : DigitMode_Exact; + if (unique == 0 && precision < 0) { PyErr_SetString(PyExc_TypeError, "in non-unique mode `precision` must be supplied"); return NULL; } - return Dragon4_Scientific(obj, unique, precision, sign, - trim, pad_left, exp_digits); + return Dragon4_Scientific(obj, digit_mode, precision, sign, trim, + pad_left, exp_digits); } /* * Prints floating-point scalars usign the Dragon4 algorithm, positional mode. - * Arguments: - * x - a numpy scalar of Floating type - * precision - number of fractional digits to show. In unique mode, can be - * ommited and the unique repr will be returned, otherwise the - * unique value will be truncated to this number of digits - * (breaking the uniqueness guarantee). In fixed mode, is - * required, and specifies the number of fractional digits to - * print. - * unique - whether to use unique (default) or fixed mode. - * sign - whether to show the sign for positive values. Default False - * trim - one of 'k', '.', '0', '-' to control trailing digits, as follows: - * k : don't trim zeros, always leave a decimal point - * . : trim all but the zero before the decimal point - * 0 : trim all trailing zeros, leave decimal point - * - : trim trailing zeros and a trailing decimal point - * Default is k. - * pad_left - pads left side of string with whitespace until at least - * this many characters are to the left of the decimal point. If - * -1, don't add any padding. Default -1. - * pad_right - pads right side of string with whitespace until at least - * this many characters are to the right of the decimal point. If - * -1, don't add any padding. Default -1. + * See docstring of `np.format_float_positional` for description of arguments. + * The differences is that a value of -1 is valid for pad_left, pad_right, + * precision, which is equivalent to `None`. */ static PyObject * dragon4_positional(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { PyObject *obj; - static char *kwlist[] = {"x", "precision", "unique", "sign", "trim", - "pad_left", "pad_right", NULL}; + static char *kwlist[] = {"x", "precision", "unique", "fractional", + "sign", "trim", "pad_left", "pad_right", NULL}; int precision=-1, pad_left=-1, pad_right=-1; char *trimstr=NULL; + CutoffMode cutoff_mode; + DigitMode digit_mode; TrimMode trim = TrimMode_None; - int sign=0, unique=1; + int sign=0, unique=1, fractional=0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|iiisii", kwlist, - &obj, &precision, &unique, &sign, &trimstr, &pad_left, - &pad_right)) { + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|iiiisii:dragon4_positional", + kwlist, &obj, &precision, &unique, &fractional, &sign, &trimstr, + &pad_left, &pad_right)) { return NULL; } @@ -3718,13 +3705,17 @@ dragon4_positional(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) } } + digit_mode = unique ? DigitMode_Unique : DigitMode_Exact; + cutoff_mode = fractional ? CutoffMode_FractionLength : + CutoffMode_TotalLength; + if (unique == 0 && precision < 0) { PyErr_SetString(PyExc_TypeError, "in non-unique mode `precision` must be supplied"); return NULL; } - return Dragon4_Positional(obj, unique, precision, sign, + return Dragon4_Positional(obj, digit_mode, cutoff_mode, precision, sign, trim, pad_left, pad_right); } @@ -3744,8 +3735,8 @@ format_longfloat(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) "not a longfloat"); return NULL; } - return Dragon4_Scientific(obj, precision, 0, 1, TrimMode_LeaveOneZero, - -1, -1); + return Dragon4_Scientific(obj, DigitMode_Unique, precision, 0, + TrimMode_LeaveOneZero, -1, -1); } static PyObject * @@ -4467,6 +4458,8 @@ static struct PyMethodDef array_module_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"normalize_axis_index", (PyCFunction)normalize_axis_index, METH_VARARGS | METH_KEYWORDS, NULL}, + {"set_legacy_print_mode", (PyCFunction)set_legacy_print_mode, + METH_VARARGS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; @@ -4673,6 +4666,7 @@ set_flaginfo(PyObject *d) _addnew(CONTIGUOUS, NPY_ARRAY_C_CONTIGUOUS, C); _addnew(ALIGNED, NPY_ARRAY_ALIGNED, A); _addnew(UPDATEIFCOPY, NPY_ARRAY_UPDATEIFCOPY, U); + _addnew(WRITEBACKIFCOPY, NPY_ARRAY_WRITEBACKIFCOPY, X); _addnew(WRITEABLE, NPY_ARRAY_WRITEABLE, W); _addone(C_CONTIGUOUS, NPY_ARRAY_C_CONTIGUOUS); _addone(F_CONTIGUOUS, NPY_ARRAY_F_CONTIGUOUS); @@ -4695,6 +4689,8 @@ NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_order = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_copy = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_dtype = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_ndmin = NULL; +NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_axis1 = NULL; +NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_axis2 = NULL; static int intern_strings(void) @@ -4709,12 +4705,14 @@ intern_strings(void) npy_ma_str_copy = PyUString_InternFromString("copy"); npy_ma_str_dtype = PyUString_InternFromString("dtype"); npy_ma_str_ndmin = PyUString_InternFromString("ndmin"); + npy_ma_str_axis1 = PyUString_InternFromString("axis1"); + npy_ma_str_axis2 = PyUString_InternFromString("axis2"); return npy_ma_str_array && npy_ma_str_array_prepare && npy_ma_str_array_wrap && npy_ma_str_array_finalize && npy_ma_str_buffer && npy_ma_str_ufunc && npy_ma_str_order && npy_ma_str_copy && npy_ma_str_dtype && - npy_ma_str_ndmin; + npy_ma_str_ndmin && npy_ma_str_axis1 && npy_ma_str_axis2; } @@ -4885,6 +4883,13 @@ PyMODINIT_FUNC initmultiarray(void) { (PyObject *)&NpyBusDayCalendar_Type); set_flaginfo(d); + /* Create the typeinfo types */ + typeinfo_init_structsequences(); + PyDict_SetItemString(d, + "typeinfo", (PyObject *)&PyArray_typeinfoType); + PyDict_SetItemString(d, + "typeinforanged", (PyObject *)&PyArray_typeinforangedType); + if (!intern_strings()) { goto err; } diff --git a/numpy/core/src/multiarray/multiarraymodule.h b/numpy/core/src/multiarray/multiarraymodule.h index 82ae24845..3de68c549 100644 --- a/numpy/core/src/multiarray/multiarraymodule.h +++ b/numpy/core/src/multiarray/multiarraymodule.h @@ -11,5 +11,7 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_order; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_copy; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_dtype; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_ndmin; +NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis1; +NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis2; #endif diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c index 1af396821..0d318178f 100644 --- a/numpy/core/src/multiarray/nditer_pywrap.c +++ b/numpy/core/src/multiarray/nditer_pywrap.c @@ -694,7 +694,7 @@ npyiter_convert_ops(PyObject *op_in, PyObject *op_flags_in, int fromanyflags = 0; if (op_flags[iop]&(NPY_ITER_READWRITE|NPY_ITER_WRITEONLY)) { - fromanyflags |= NPY_ARRAY_UPDATEIFCOPY; + fromanyflags |= NPY_ARRAY_WRITEBACKIFCOPY; } ao = (PyArrayObject *)PyArray_FROM_OF((PyObject *)op[iop], fromanyflags); diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c index dbf71230a..915d743c8 100644 --- a/numpy/core/src/multiarray/number.c +++ b/numpy/core/src/multiarray/number.c @@ -16,6 +16,15 @@ #include "binop_override.h" +/* <2.7.11 and <3.4.4 have the wrong argument type for Py_EnterRecursiveCall */ +#if (PY_VERSION_HEX < 0x02070B00) || \ + ((0x03000000 <= PY_VERSION_HEX) && (PY_VERSION_HEX < 0x03040400)) + #define _Py_EnterRecursiveCall(x) Py_EnterRecursiveCall((char *)(x)) +#else + #define _Py_EnterRecursiveCall(x) Py_EnterRecursiveCall(x) +#endif + + /************************************************************************* **************** Implement Number Protocol **************************** *************************************************************************/ @@ -785,7 +794,7 @@ _array_nonzero(PyArrayObject *mp) n = PyArray_SIZE(mp); if (n == 1) { int res; - if (Py_EnterRecursiveCall(" while converting array to bool")) { + if (_Py_EnterRecursiveCall(" while converting array to bool")) { return -1; } res = PyArray_DESCR(mp)->f->nonzero(PyArray_DATA(mp), mp); @@ -814,213 +823,112 @@ _array_nonzero(PyArrayObject *mp) } } - +/* + * Convert the array to a scalar if allowed, and apply the builtin function + * to it. The where argument is passed onto Py_EnterRecursiveCall when the + * array contains python objects. + */ NPY_NO_EXPORT PyObject * -array_int(PyArrayObject *v) +array_scalar_forward(PyArrayObject *v, + PyObject *(*builtin_func)(PyObject *), + const char *where) { - PyObject *pv, *pv2; + PyObject *scalar; if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can be"\ + PyErr_SetString(PyExc_TypeError, "only size-1 arrays can be"\ " converted to Python scalars"); return NULL; } - pv = PyArray_GETITEM(v, PyArray_DATA(v)); - if (pv == NULL) { - return NULL; - } - if (Py_TYPE(pv)->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ - "scalar object is not a number"); - Py_DECREF(pv); + + scalar = PyArray_GETITEM(v, PyArray_DATA(v)); + if (scalar == NULL) { return NULL; } - if (Py_TYPE(pv)->tp_as_number->nb_int == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to int"); - Py_DECREF(pv); - return NULL; + + /* Need to guard against recursion if our array holds references */ + if (PyDataType_REFCHK(PyArray_DESCR(v))) { + PyObject *res; + if (_Py_EnterRecursiveCall(where) != 0) { + Py_DECREF(scalar); + return NULL; + } + res = builtin_func(scalar); + Py_DECREF(scalar); + Py_LeaveRecursiveCall(); + return res; } - /* - * If we still got an array which can hold references, stop - * because it could point back at 'v'. - */ - if (PyArray_Check(pv) && - PyDataType_REFCHK(PyArray_DESCR((PyArrayObject *)pv))) { - PyErr_SetString(PyExc_TypeError, - "object array may be self-referencing"); - Py_DECREF(pv); - return NULL; + else { + PyObject *res; + res = builtin_func(scalar); + Py_DECREF(scalar); + return res; } - - pv2 = Py_TYPE(pv)->tp_as_number->nb_int(pv); - Py_DECREF(pv); - return pv2; } -static PyObject * + +NPY_NO_EXPORT PyObject * array_float(PyArrayObject *v) { - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\ - "be converted to Python scalars"); - return NULL; - } - pv = PyArray_GETITEM(v, PyArray_DATA(v)); - if (pv == NULL) { - return NULL; - } - if (Py_TYPE(pv)->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to a "\ - "float; scalar object is not a number"); - Py_DECREF(pv); - return NULL; - } - if (Py_TYPE(pv)->tp_as_number->nb_float == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to float"); - Py_DECREF(pv); - return NULL; - } - /* - * If we still got an array which can hold references, stop - * because it could point back at 'v'. - */ - if (PyArray_Check(pv) && - PyDataType_REFCHK(PyArray_DESCR((PyArrayObject *)pv))) { - PyErr_SetString(PyExc_TypeError, - "object array may be self-referencing"); - Py_DECREF(pv); - return NULL; - } - pv2 = Py_TYPE(pv)->tp_as_number->nb_float(pv); - Py_DECREF(pv); - return pv2; + return array_scalar_forward(v, &PyNumber_Float, " in ndarray.__float__"); } -#if !defined(NPY_PY3K) +#if defined(NPY_PY3K) -static PyObject * +NPY_NO_EXPORT PyObject * +array_int(PyArrayObject *v) +{ + return array_scalar_forward(v, &PyNumber_Long, " in ndarray.__int__"); +} + +#else + +NPY_NO_EXPORT PyObject * +array_int(PyArrayObject *v) +{ + return array_scalar_forward(v, &PyNumber_Int, " in ndarray.__int__"); +} + +NPY_NO_EXPORT PyObject * array_long(PyArrayObject *v) { - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\ - "be converted to Python scalars"); - return NULL; - } - pv = PyArray_GETITEM(v, PyArray_DATA(v)); - if (pv == NULL) { - return NULL; - } - if (Py_TYPE(pv)->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ - "scalar object is not a number"); - Py_DECREF(pv); - return NULL; - } - if (Py_TYPE(pv)->tp_as_number->nb_long == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to long"); - Py_DECREF(pv); + return array_scalar_forward(v, &PyNumber_Long, " in ndarray.__long__"); +} + +/* hex and oct aren't exposed to the C api, but we need a function pointer */ +static PyObject * +_PyNumber_Oct(PyObject *o) { + PyObject *res; + PyObject *mod = PyImport_ImportModule("__builtin__"); + if (mod == NULL) { return NULL; } - /* - * If we still got an array which can hold references, stop - * because it could point back at 'v'. - */ - if (PyArray_Check(pv) && - PyDataType_REFCHK(PyArray_DESCR((PyArrayObject *)pv))) { - PyErr_SetString(PyExc_TypeError, - "object array may be self-referencing"); - Py_DECREF(pv); + res = PyObject_CallMethod(mod, "oct", "(O)", o); + Py_DECREF(mod); + return res; +} + +static PyObject * +_PyNumber_Hex(PyObject *o) { + PyObject *res; + PyObject *mod = PyImport_ImportModule("__builtin__"); + if (mod == NULL) { return NULL; } - pv2 = Py_TYPE(pv)->tp_as_number->nb_long(pv); - Py_DECREF(pv); - return pv2; + res = PyObject_CallMethod(mod, "hex", "(O)", o); + Py_DECREF(mod); + return res; } -static PyObject * +NPY_NO_EXPORT PyObject * array_oct(PyArrayObject *v) { - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\ - "be converted to Python scalars"); - return NULL; - } - pv = PyArray_GETITEM(v, PyArray_DATA(v)); - if (pv == NULL) { - return NULL; - } - if (Py_TYPE(pv)->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ - "scalar object is not a number"); - Py_DECREF(pv); - return NULL; - } - if (Py_TYPE(pv)->tp_as_number->nb_oct == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to oct"); - Py_DECREF(pv); - return NULL; - } - /* - * If we still got an array which can hold references, stop - * because it could point back at 'v'. - */ - if (PyArray_Check(pv) && - PyDataType_REFCHK(PyArray_DESCR((PyArrayObject *)pv))) { - PyErr_SetString(PyExc_TypeError, - "object array may be self-referencing"); - Py_DECREF(pv); - return NULL; - } - pv2 = Py_TYPE(pv)->tp_as_number->nb_oct(pv); - Py_DECREF(pv); - return pv2; + return array_scalar_forward(v, &_PyNumber_Oct, " in ndarray.__oct__"); } -static PyObject * +NPY_NO_EXPORT PyObject * array_hex(PyArrayObject *v) { - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\ - "be converted to Python scalars"); - return NULL; - } - pv = PyArray_GETITEM(v, PyArray_DATA(v)); - if (pv == NULL) { - return NULL; - } - if (Py_TYPE(pv)->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ - "scalar object is not a number"); - Py_DECREF(pv); - return NULL; - } - if (Py_TYPE(pv)->tp_as_number->nb_hex == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to hex"); - Py_DECREF(pv); - return NULL; - } - /* - * If we still got an array which can hold references, stop - * because it could point back at 'v'. - */ - if (PyArray_Check(pv) && - PyDataType_REFCHK(PyArray_DESCR((PyArrayObject *)pv))) { - PyErr_SetString(PyExc_TypeError, - "object array may be self-referencing"); - Py_DECREF(pv); - return NULL; - } - pv2 = Py_TYPE(pv)->tp_as_number->nb_hex(pv); - Py_DECREF(pv); - return pv2; + return array_scalar_forward(v, &_PyNumber_Hex, " in ndarray.__hex__"); } #endif diff --git a/numpy/core/src/multiarray/numpyos.c b/numpy/core/src/multiarray/numpyos.c index e6f414786..52dcbf3c8 100644 --- a/numpy/core/src/multiarray/numpyos.c +++ b/numpy/core/src/multiarray/numpyos.c @@ -17,7 +17,7 @@ #include <stdlib.h> #ifdef HAVE_XLOCALE_H /* - * the defines from xlocale.h are included in locale.h on some sytems; + * the defines from xlocale.h are included in locale.h on some systems; * see gh-8367 */ #include <xlocale.h> diff --git a/numpy/core/src/multiarray/refcount.c b/numpy/core/src/multiarray/refcount.c index 88f660118..4b018b056 100644 --- a/numpy/core/src/multiarray/refcount.c +++ b/numpy/core/src/multiarray/refcount.c @@ -276,7 +276,9 @@ _fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype) } else { npy_intp i; - for (i = 0; i < dtype->elsize / sizeof(obj); i++) { + npy_intp nsize = dtype->elsize / sizeof(obj); + + for (i = 0; i < nsize; i++) { Py_XINCREF(obj); NPY_COPY_PYOBJECT_PTR(optr, &obj); optr += sizeof(obj); diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index 306944d11..ee83206de 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -238,44 +238,34 @@ gentype_@name@(PyObject *m1, PyObject *m2) /**end repeat**/ #endif +/* Get a nested slot, or NULL if absent */ +#define GET_NESTED_SLOT(type, group, slot) \ + ((type)->group == NULL ? NULL : (type)->group->slot) + static PyObject * gentype_multiply(PyObject *m1, PyObject *m2) { - npy_intp repeat; - /* * If the other object supports sequence repeat and not number multiply - * we should call sequence repeat to support e.g. list repeat by numpy - * scalars (they may be converted to ndarray otherwise). + * we fall back on the python builtin to invoke the sequence repeat, rather + * than promoting both arguments to ndarray. + * This covers a list repeat by numpy scalars. * A python defined class will always only have the nb_multiply slot and * some classes may have neither defined. For the latter we want need * to give the normal case a chance to convert the object to ndarray. * Probably no class has both defined, but if they do, prefer number. */ if (!PyArray_IsScalar(m1, Generic) && - ((Py_TYPE(m1)->tp_as_sequence != NULL) && - (Py_TYPE(m1)->tp_as_sequence->sq_repeat != NULL)) && - ((Py_TYPE(m1)->tp_as_number == NULL) || - (Py_TYPE(m1)->tp_as_number->nb_multiply == NULL))) { - /* Try to convert m2 to an int and try sequence repeat */ - repeat = PyArray_PyIntAsIntp(m2); - if (error_converting(repeat)) { - return NULL; - } - /* Note that npy_intp is compatible to Py_Ssize_t */ - return PySequence_Repeat(m1, repeat); + GET_NESTED_SLOT(Py_TYPE(m1), tp_as_sequence, sq_repeat) != NULL && + GET_NESTED_SLOT(Py_TYPE(m1), tp_as_number, nb_multiply) == NULL) { + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; } if (!PyArray_IsScalar(m2, Generic) && - ((Py_TYPE(m2)->tp_as_sequence != NULL) && - (Py_TYPE(m2)->tp_as_sequence->sq_repeat != NULL)) && - ((Py_TYPE(m2)->tp_as_number == NULL) || - (Py_TYPE(m2)->tp_as_number->nb_multiply == NULL))) { - /* Try to convert m1 to an int and try sequence repeat */ - repeat = PyArray_PyIntAsIntp(m1); - if (error_converting(repeat)) { - return NULL; - } - return PySequence_Repeat(m2, repeat); + GET_NESTED_SLOT(Py_TYPE(m2), tp_as_sequence, sq_repeat) != NULL && + GET_NESTED_SLOT(Py_TYPE(m2), tp_as_number, nb_multiply) == NULL) { + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; } /* All normal cases are handled by PyArray's multiply */ BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_multiply, gentype_multiply); @@ -443,15 +433,18 @@ format_@name@(@type@ val, npy_bool scientific, int pad_left, int pad_right, int exp_digits) { if (scientific) { - return Dragon4_Scientific_AnySize(&val, sizeof(@type@), 1, precision, - sign, trim, pad_left, exp_digits); + return Dragon4_Scientific_AnySize(&val, sizeof(@type@), + DigitMode_Unique, precision, + sign, trim, pad_left, exp_digits); } else { - return Dragon4_Positional_AnySize(&val, sizeof(@type@), 1, precision, - sign, trim, pad_left, pad_right); + return Dragon4_Positional_AnySize(&val, sizeof(@type@), + DigitMode_Unique, CutoffMode_TotalLength, precision, + sign, trim, pad_left, pad_right); } } + /**end repeat**/ /* @@ -492,10 +485,61 @@ static PyObject * } /**end repeat**/ + +/* + * Convert array of bytes to a string representation much like bytes.__repr__, + * but convert all bytes (including ASCII) to the `\x00` notation with + * uppercase hex codes (FF not ff). + * + * Largely copied from _Py_strhex_impl in CPython implementation + */ +static NPY_INLINE PyObject * +_void_to_hex(const char* argbuf, const Py_ssize_t arglen, + const char *schars, const char *bprefix, const char *echars) +{ + PyObject *retval; + int extrachars, slen; + char *retbuf; + Py_ssize_t i, j; + char const *hexdigits = "0123456789ABCDEF"; + + extrachars = strlen(schars) + strlen(echars); + slen = extrachars + arglen*(2 + strlen(bprefix)); + + if (arglen > (PY_SSIZE_T_MAX / 2) - extrachars) { + return PyErr_NoMemory(); + } + + retbuf = (char *)PyMem_Malloc(slen); + if (!retbuf) { + return PyErr_NoMemory(); + } + + memcpy(retbuf, schars, strlen(schars)); + j = strlen(schars); + + for (i = 0; i < arglen; i++) { + unsigned char c; + memcpy(&retbuf[j], bprefix, strlen(bprefix)); + j += strlen(bprefix); + c = (argbuf[i] >> 4) & 0xf; + retbuf[j++] = hexdigits[c]; + c = argbuf[i] & 0xf; + retbuf[j++] = hexdigits[c]; + } + memcpy(&retbuf[j], echars, strlen(echars)); + + retval = PyUString_FromStringAndSize(retbuf, slen); + PyMem_Free(retbuf); + + return retval; +} + static PyObject * -voidtype_str(PyObject *self) +voidtype_repr(PyObject *self) { - if (PyDataType_HASFIELDS(((PyVoidScalarObject*)self)->descr)) { + PyVoidScalarObject *s = (PyVoidScalarObject*) self; + if (PyDataType_HASFIELDS(s->descr)) { static PyObject *reprfunc = NULL; npy_cache_import("numpy.core.arrayprint", @@ -506,18 +550,25 @@ voidtype_str(PyObject *self) return PyObject_CallFunction(reprfunc, "O", self); } - else { - PyObject *item, *item_str; + return _void_to_hex(s->obval, s->descr->elsize, "void(b'", "\\x", "')"); +} - item = gentype_generic_method(self, NULL, NULL, "item"); - if (item == NULL) { +static PyObject * +voidtype_str(PyObject *self) +{ + PyVoidScalarObject *s = (PyVoidScalarObject*) self; + if (PyDataType_HASFIELDS(s->descr)) { + static PyObject *reprfunc = NULL; + + npy_cache_import("numpy.core.arrayprint", + "_void_scalar_repr", &reprfunc); + if (reprfunc == NULL) { return NULL; } - item_str = PyObject_Str(item); - Py_DECREF(item); - return item_str; + return PyObject_CallFunction(reprfunc, "O", self); } + return _void_to_hex(s->obval, s->descr->elsize, "b'", "\\x", "'"); } static PyObject * @@ -719,6 +770,173 @@ timedeltatype_str(PyObject *self) * These functions will return NULL if PyString creation fails. */ + +/* + * *** BEGIN LEGACY PRINTING MODE CODE *** + * + * This code is legacy code needed to reproduce the printing behavior of + * scalars in numpy 1.13. One day we hope to remove it. + */ + +/* determines if legacy mode is enabled, global set in multiarraymodule.c */ +extern int npy_legacy_print_mode; + +#define HALFPREC_REPR 5 +#define HALFPREC_STR 5 +#define FLOATPREC_REPR 8 +#define FLOATPREC_STR 6 +#define DOUBLEPREC_REPR 17 +#define DOUBLEPREC_STR 12 +#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE +#define LONGDOUBLEPREC_REPR DOUBLEPREC_REPR +#define LONGDOUBLEPREC_STR DOUBLEPREC_STR +#else /* More than probably needed on Intel FP */ +#define LONGDOUBLEPREC_REPR 20 +#define LONGDOUBLEPREC_STR 12 +#endif + +/**begin repeat + * #kind = str, repr# + * #KIND = STR, REPR# + */ + +/**begin repeat1 + * #name = cfloat, cdouble, clongdouble# + * #NAME = FLOAT, DOUBLE, LONGDOUBLE# + * #type = npy_cfloat, npy_cdouble, npy_clongdouble# + * #suff = f, d, l# + */ + +#define _FMT1 "%%.%i" NPY_@NAME@_FMT +#define _FMT2 "%%+.%i" NPY_@NAME@_FMT + +static PyObject* +legacy_@name@_format@kind@(@type@ val) +{ + /* XXX: Find a correct size here for format string */ + char format[64], buf[100], *res; + + /* + * Ideally, we should handle this nan/inf stuff in NumpyOS_ascii_format* + */ + if (val.real == 0.0 && npy_signbit(val.real) == 0) { + PyOS_snprintf(format, sizeof(format), _FMT1, @NAME@PREC_@KIND@); + res = NumPyOS_ascii_format@suff@(buf, sizeof(buf) - 1, format, val.imag, 0); + if (res == NULL) { + PyErr_SetString(PyExc_RuntimeError, "Error while formatting"); + return NULL; + } + if (!npy_isfinite(val.imag)) { + strncat(buf, "*", 1); + } + strncat(buf, "j", 1); + } + else { + char re[64], im[64]; + if (npy_isfinite(val.real)) { + PyOS_snprintf(format, sizeof(format), _FMT1, @NAME@PREC_@KIND@); + res = NumPyOS_ascii_format@suff@(re, sizeof(re), format, + val.real, 0); + if (res == NULL) { + PyErr_SetString(PyExc_RuntimeError, "Error while formatting"); + return NULL; + } + } + else { + if (npy_isnan(val.real)) { + strcpy(re, "nan"); + } + else if (val.real > 0){ + strcpy(re, "inf"); + } + else { + strcpy(re, "-inf"); + } + } + + + if (npy_isfinite(val.imag)) { + PyOS_snprintf(format, sizeof(format), _FMT2, @NAME@PREC_@KIND@); + res = NumPyOS_ascii_format@suff@(im, sizeof(im), format, + val.imag, 0); + if (res == NULL) { + PyErr_SetString(PyExc_RuntimeError, "Error while formatting"); + return NULL; + } + } + else { + if (npy_isnan(val.imag)) { + strcpy(im, "+nan"); + } + else if (val.imag > 0){ + strcpy(im, "+inf"); + } + else { + strcpy(im, "-inf"); + } + if (!npy_isfinite(val.imag)) { + strncat(im, "*", 1); + } + } + PyOS_snprintf(buf, sizeof(buf), "(%s%sj)", re, im); + } + + return PyUString_FromString(buf); +} + +#undef _FMT1 +#undef _FMT2 + +/**end repeat1**/ + +/**begin repeat1 + * #name = float, double, longdouble# + * #Name = Float, Double, LongDouble# + * #NAME = FLOAT, DOUBLE, LONGDOUBLE# + * #suff = f, d, l# + */ + +#define _FMT1 "%%.%i" NPY_@NAME@_FMT + +static PyObject * +legacy_@name@_format@kind@(npy_@name@ val){ + /* XXX: Find a correct size here for format string */ + char format[64], buf[100], *res; + size_t i, cnt; + + PyOS_snprintf(format, sizeof(format), _FMT1, @NAME@PREC_@KIND@); + res = NumPyOS_ascii_format@suff@(buf, sizeof(buf), format, val, 0); + if (res == NULL) { + PyErr_SetString(PyExc_RuntimeError, "Error while formatting"); + return NULL; + } + + /* If nothing but digits after sign, append ".0" */ + cnt = strlen(buf); + for (i = (buf[0] == '-') ? 1 : 0; i < cnt; ++i) { + if (!isdigit(Py_CHARMASK(buf[i]))) { + break; + } + } + if (i == cnt && sizeof(buf) >= cnt + 3) { + strcpy(&buf[cnt],".0"); + } + + return PyUString_FromString(buf); +} + +#undef _FMT1 + +/**end repeat1**/ + +/**end repeat**/ + + +/* + * *** END LEGACY PRINTING MODE CODE *** + */ + + /**begin repeat * #kind = str, repr# */ @@ -729,11 +947,20 @@ timedeltatype_str(PyObject *self) * #NAME = FLOAT, DOUBLE, LONGDOUBLE# */ +/* helper function choose scientific of fractional output, based on a cutoff */ static PyObject * @name@type_@kind@_either(npy_@name@ val, TrimMode trim_pos, TrimMode trim_sci, npy_bool sign) { - if (val < (npy_@name@)1.e16L) { + npy_@name@ absval; + + if (npy_legacy_print_mode == 113) { + return legacy_@name@_format@kind@(val); + } + + absval = val < 0 ? -val : val; + + if (absval == 0 || (absval < 1.e16L && absval >= 1.e-4L) ) { return format_@name@(val, 0, -1, sign, trim_pos, -1, -1, -1); } return format_@name@(val, 1, -1, sign, trim_sci, -1, -1, -1); @@ -753,11 +980,16 @@ c@name@type_@kind@(PyObject *self) npy_c@name@ val = ((PyC@Name@ScalarObject *)self)->obval; TrimMode trim = TrimMode_DptZeros; + if (npy_legacy_print_mode == 113) { + return legacy_c@name@_format@kind@(val); + } + if (val.real == 0.0 && npy_signbit(val.real) == 0) { istr = @name@type_@kind@_either(val.imag, trim, trim, 0); if (istr == NULL) { return NULL; } + PyUString_ConcatAndDel(&istr, PyUString_FromString("j")); return istr; } @@ -805,16 +1037,27 @@ c@name@type_@kind@(PyObject *self) /**end repeat1**/ + static PyObject * halftype_@kind@(PyObject *self) { npy_half val = ((PyHalfScalarObject *)self)->obval; - if (npy_half_to_double(val) < 1.e16) { + float floatval = npy_half_to_float(val); + float absval; + + if (npy_legacy_print_mode == 113) { + return legacy_float_format@kind@(floatval); + } + + absval = floatval < 0 ? -floatval : floatval; + + if (absval == 0 || (absval < 1.e16 && absval >= 1.e-4) ) { return format_half(val, 0, -1, 0, TrimMode_LeaveOneZero, -1, -1, -1); } return format_half(val, 1, -1, 0, TrimMode_DptZeros, -1, -1, -1); } + /**end repeat**/ /**begin repeat @@ -1090,7 +1333,8 @@ gentype_struct_get(PyObject *self) inter->two = 2; inter->nd = 0; inter->flags = PyArray_FLAGS(arr); - inter->flags &= ~(NPY_ARRAY_UPDATEIFCOPY | NPY_ARRAY_OWNDATA); + inter->flags &= ~(NPY_ARRAY_UPDATEIFCOPY | NPY_ARRAY_WRITEBACKIFCOPY | + NPY_ARRAY_OWNDATA); inter->flags |= NPY_ARRAY_NOTSWAPPED; inter->typekind = PyArray_DESCR(arr)->kind; inter->itemsize = PyArray_DESCR(arr)->elsize; @@ -2122,11 +2366,7 @@ voidtype_ass_subscript(PyVoidScalarObject *self, PyObject *ind, PyObject *val) return -1; } -#if defined(NPY_PY3K) - if (PyUString_Check(ind)) { -#else - if (PyBytes_Check(ind) || PyUnicode_Check(ind)) { -#endif + if (PyBaseString_Check(ind)) { /* * Much like in voidtype_setfield, we cannot simply use ndarray's * __setitem__ since assignment to void scalars should not broadcast @@ -2817,7 +3057,7 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds)) if (PyErr_Occurred() || (memu > NPY_MAX_INT)) { PyErr_Clear(); PyErr_Format(PyExc_OverflowError, - "size cannot be greater than %d", + "size must be non-negative and not greater than %d", (int) NPY_MAX_INT); return NULL; } @@ -3996,7 +4236,7 @@ initialize_numeric_types(void) PyVoidArrType_Type.tp_getset = voidtype_getsets; PyVoidArrType_Type.tp_as_mapping = &voidtype_as_mapping; PyVoidArrType_Type.tp_as_sequence = &voidtype_as_sequence; - PyVoidArrType_Type.tp_repr = voidtype_str; + PyVoidArrType_Type.tp_repr = voidtype_repr; PyVoidArrType_Type.tp_str = voidtype_str; PyIntegerArrType_Type.tp_getset = inttype_getsets; diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c index 40925d8b9..29c122bd3 100644 --- a/numpy/core/src/multiarray/shape.c +++ b/numpy/core/src/multiarray/shape.c @@ -17,6 +17,7 @@ #include "shape.h" +#include "multiarraymodule.h" /* for interned strings */ #include "templ_common.h" /* for npy_mul_with_overflow_intp */ #include "common.h" /* for convert_shape_to_string */ #include "alloc.h" @@ -339,7 +340,9 @@ _putzero(char *optr, PyObject *zero, PyArray_Descr *dtype) } else { npy_intp i; - for (i = 0; i < dtype->elsize / sizeof(zero); i++) { + npy_intp nsize = dtype->elsize / sizeof(zero); + + for (i = 0; i < nsize; i++) { Py_INCREF(zero); NPY_COPY_PYOBJECT_PTR(optr, &zero); optr += sizeof(zero); @@ -646,20 +649,10 @@ PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2) int n = PyArray_NDIM(ap); int i; - if (a1 < 0) { - a1 += n; - } - if (a2 < 0) { - a2 += n; - } - if ((a1 < 0) || (a1 >= n)) { - PyErr_SetString(PyExc_ValueError, - "bad axis1 argument to swapaxes"); + if (check_and_adjust_axis_msg(&a1, n, npy_ma_str_axis1) < 0) { return NULL; } - if ((a2 < 0) || (a2 >= n)) { - PyErr_SetString(PyExc_ValueError, - "bad axis2 argument to swapaxes"); + if (check_and_adjust_axis_msg(&a2, n, npy_ma_str_axis2) < 0) { return NULL; } diff --git a/numpy/core/src/multiarray/strfuncs.c b/numpy/core/src/multiarray/strfuncs.c index bb94eb9f3..646d15cdb 100644 --- a/numpy/core/src/multiarray/strfuncs.c +++ b/numpy/core/src/multiarray/strfuncs.c @@ -225,3 +225,35 @@ array_format(PyArrayObject *self, PyObject *args) ); } } + +#ifndef NPY_PY3K + +NPY_NO_EXPORT PyObject * +array_unicode(PyArrayObject *self) +{ + PyObject *uni; + + if (PyArray_NDIM(self) == 0) { + PyObject *item = PyArray_ToScalar(PyArray_DATA(self), self); + if (item == NULL){ + return NULL; + } + + /* defer to invoking `unicode` on the scalar */ + uni = PyObject_CallFunctionObjArgs( + (PyObject *)&PyUnicode_Type, item, NULL); + Py_DECREF(item); + } + else { + /* Do what unicode(self) would normally do */ + PyObject *str = PyObject_Str((PyObject *)self); + if (str == NULL){ + return NULL; + } + uni = PyUnicode_FromObject(str); + Py_DECREF(str); + } + return uni; +} + +#endif diff --git a/numpy/core/src/multiarray/strfuncs.h b/numpy/core/src/multiarray/strfuncs.h index 5dd661a20..7e869d926 100644 --- a/numpy/core/src/multiarray/strfuncs.h +++ b/numpy/core/src/multiarray/strfuncs.h @@ -13,4 +13,9 @@ array_str(PyArrayObject *self); NPY_NO_EXPORT PyObject * array_format(PyArrayObject *self, PyObject *args); +#ifndef NPY_PY3K + NPY_NO_EXPORT PyObject * + array_unicode(PyArrayObject *self); +#endif + #endif diff --git a/numpy/core/src/multiarray/temp_elide.c b/numpy/core/src/multiarray/temp_elide.c index b8fa4c0ae..3d2f976f2 100644 --- a/numpy/core/src/multiarray/temp_elide.c +++ b/numpy/core/src/multiarray/temp_elide.c @@ -7,6 +7,7 @@ #include "numpy/arrayobject.h" #define NPY_NUMBER_MAX(a, b) ((a) > (b) ? (a) : (b)) +#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0])) /* * Functions used to try to avoid/elide temporaries in python expressions @@ -181,6 +182,7 @@ check_callers(int * cannot) Dl_info info; int in_python = 0; int in_multiarray = 0; + #if NPY_ELIDE_DEBUG >= 2 dladdr(buffer[i], &info); printf("%s(%p) %s(%p)\n", info.dli_fname, info.dli_fbase, @@ -242,14 +244,14 @@ check_callers(int * cannot) } if (info.dli_sname && strcmp(info.dli_sname, PYFRAMEEVAL_FUNC) == 0) { - if (n_pyeval < sizeof(pyeval_addr) / sizeof(pyeval_addr[0])) { + if (n_pyeval < (npy_intp)ARRAY_SIZE(pyeval_addr)) { /* store address to not have to dladdr it again */ pyeval_addr[n_pyeval++] = buffer[i]; } ok = 1; break; } - else if (n_py_addr < sizeof(py_addr) / sizeof(py_addr[0])) { + else if (n_py_addr < (npy_intp)ARRAY_SIZE(py_addr)) { /* store other py function to not have to dladdr it again */ py_addr[n_py_addr++] = buffer[i]; } @@ -287,6 +289,7 @@ can_elide_temp(PyArrayObject * alhs, PyObject * orhs, int * cannot) !PyArray_CHKFLAGS(alhs, NPY_ARRAY_OWNDATA) || !PyArray_ISWRITEABLE(alhs) || PyArray_CHKFLAGS(alhs, NPY_ARRAY_UPDATEIFCOPY) || + PyArray_CHKFLAGS(alhs, NPY_ARRAY_WRITEBACKIFCOPY) || PyArray_NBYTES(alhs) < NPY_MIN_ELIDE_BYTES) { return 0; } diff --git a/numpy/core/src/multiarray/typeinfo.c b/numpy/core/src/multiarray/typeinfo.c new file mode 100644 index 000000000..f0af76809 --- /dev/null +++ b/numpy/core/src/multiarray/typeinfo.c @@ -0,0 +1,114 @@ +/* + * Provides namedtuples for numpy.core.multiarray.typeinfo + * Unfortunately, we need two different types to cover the cases where min/max + * do and do not appear in the tuple. + */ +#define PY_SSIZE_T_CLEAN +#include <Python.h> + +/* In python 2, this is not exported from Python.h */ +#include <structseq.h> + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE +#include "npy_pycompat.h" + + +PyTypeObject PyArray_typeinfoType; +PyTypeObject PyArray_typeinforangedType; + +static PyStructSequence_Field typeinfo_fields[] = { + {"char", "The character used to represent the type"}, + {"num", "The numeric id assigned to the type"}, + {"bits", "The number of bits in the type"}, + {"alignment", "The alignment of the type in bytes"}, + {"type", "The python type object this info is about"}, + {NULL, NULL,} +}; + +static PyStructSequence_Field typeinforanged_fields[] = { + {"char", "The character used to represent the type"}, + {"num", "The numeric id assigned to the type"}, + {"bits", "The number of bits in the type"}, + {"alignment", "The alignment of the type in bytes"}, + {"max", "The maximum value of this type"}, + {"min", "The minimum value of this type"}, + {"type", "The python type object this info is about"}, + {NULL, NULL,} +}; + +static PyStructSequence_Desc typeinfo_desc = { + "numpy.core.multiarray.typeinfo", /* name */ + "Information about a scalar numpy type", /* doc */ + typeinfo_fields, /* fields */ + 5, /* n_in_sequence */ +}; + +static PyStructSequence_Desc typeinforanged_desc = { + "numpy.core.multiarray.typeinforanged", /* name */ + "Information about a scalar numpy type with a range", /* doc */ + typeinforanged_fields, /* fields */ + 7, /* n_in_sequence */ +}; + +PyObject * +PyArray_typeinfo( + char typechar, int typenum, int nbits, int align, + PyTypeObject *type_obj) +{ + PyObject *entry = PyStructSequence_New(&PyArray_typeinfoType); + if (entry == NULL) + return NULL; +#if defined(NPY_PY3K) + PyStructSequence_SET_ITEM(entry, 0, Py_BuildValue("C", typechar)); +#else + PyStructSequence_SET_ITEM(entry, 0, Py_BuildValue("c", typechar)); +#endif + PyStructSequence_SET_ITEM(entry, 1, Py_BuildValue("i", typenum)); + PyStructSequence_SET_ITEM(entry, 2, Py_BuildValue("i", nbits)); + PyStructSequence_SET_ITEM(entry, 3, Py_BuildValue("i", align)); + PyStructSequence_SET_ITEM(entry, 4, Py_BuildValue("O", (PyObject *) type_obj)); + + if (PyErr_Occurred()) { + Py_DECREF(entry); + return NULL; + } + + return entry; +} + +PyObject * +PyArray_typeinforanged( + char typechar, int typenum, int nbits, int align, + PyObject *max, PyObject *min, PyTypeObject *type_obj) +{ + PyObject *entry = PyStructSequence_New(&PyArray_typeinforangedType); + if (entry == NULL) + return NULL; +#if defined(NPY_PY3K) + PyStructSequence_SET_ITEM(entry, 0, Py_BuildValue("C", typechar)); +#else + PyStructSequence_SET_ITEM(entry, 0, Py_BuildValue("c", typechar)); +#endif + PyStructSequence_SET_ITEM(entry, 1, Py_BuildValue("i", typenum)); + PyStructSequence_SET_ITEM(entry, 2, Py_BuildValue("i", nbits)); + PyStructSequence_SET_ITEM(entry, 3, Py_BuildValue("i", align)); + PyStructSequence_SET_ITEM(entry, 4, max); + PyStructSequence_SET_ITEM(entry, 5, min); + PyStructSequence_SET_ITEM(entry, 6, Py_BuildValue("O", (PyObject *) type_obj)); + + if (PyErr_Occurred()) { + Py_DECREF(entry); + return NULL; + } + + return entry; +} + +void typeinfo_init_structsequences(void) +{ + PyStructSequence_InitType( + &PyArray_typeinfoType, &typeinfo_desc); + PyStructSequence_InitType( + &PyArray_typeinforangedType, &typeinforanged_desc); +} diff --git a/numpy/core/src/multiarray/typeinfo.h b/numpy/core/src/multiarray/typeinfo.h new file mode 100644 index 000000000..5899c2093 --- /dev/null +++ b/numpy/core/src/multiarray/typeinfo.h @@ -0,0 +1,19 @@ +#ifndef _NPY_PRIVATE_TYPEINFO_H_ +#define _NPY_PRIVATE_TYPEINFO_H_ + +void typeinfo_init_structsequences(void); + +extern PyTypeObject PyArray_typeinfoType; +extern PyTypeObject PyArray_typeinforangedType; + +PyObject * +PyArray_typeinfo( + char typechar, int typenum, int nbits, int align, + PyTypeObject *type_obj); + +PyObject * +PyArray_typeinforanged( + char typechar, int typenum, int nbits, int align, + PyObject *max, PyObject *min, PyTypeObject *type_obj); + +#endif diff --git a/numpy/core/src/multiarray/vdot.c b/numpy/core/src/multiarray/vdot.c index 4be85672e..424a21710 100644 --- a/numpy/core/src/multiarray/vdot.c +++ b/numpy/core/src/multiarray/vdot.c @@ -1,4 +1,5 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE #include <Python.h> #include "common.h" diff --git a/numpy/core/src/npymath/halffloat.c b/numpy/core/src/npymath/halffloat.c index 951768256..c2bd28d60 100644 --- a/numpy/core/src/npymath/halffloat.c +++ b/numpy/core/src/npymath/halffloat.c @@ -281,7 +281,7 @@ npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f) if (f_exp <= 0x38000000u) { /* * Signed zeros, subnormal floats, and floats with small - * exponents all convert to signed zero halfs. + * exponents all convert to signed zero half-floats. */ if (f_exp < 0x33000000u) { #if NPY_HALF_GENERATE_UNDERFLOW @@ -396,7 +396,7 @@ npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d) if (d_exp <= 0x3f00000000000000ULL) { /* * Signed zeros, subnormal floats, and floats with small - * exponents all convert to signed zero halfs. + * exponents all convert to signed zero half-floats. */ if (d_exp < 0x3e60000000000000ULL) { #if NPY_HALF_GENERATE_UNDERFLOW diff --git a/numpy/core/src/npymath/npy_math_complex.c.src b/numpy/core/src/npymath/npy_math_complex.c.src index fb31e8e6a..ea784ec5b 100644 --- a/numpy/core/src/npymath/npy_math_complex.c.src +++ b/numpy/core/src/npymath/npy_math_complex.c.src @@ -35,11 +35,17 @@ #include "npy_math_private.h" #include <numpy/utils.h> - -#define raise_inexact() do { volatile npy_float junk = 1 + tiny; } while(0) +/* + * Hack inherited from BSD, the intent is to set the FPU inexact + * flag in an efficient way. The flag is IEEE specific. See + * https://github.com/freebsd/freebsd/blob/4c6378299/lib/msun/src/catrig.c#L42 + */ +#define raise_inexact() do { \ + volatile npy_float NPY_UNUSED(junk) = 1 + tiny; \ +} while (0) -static __COMP_NPY_UNUSED npy_float tiny = 3.9443045e-31f; +static const volatile npy_float tiny = 3.9443045e-31f; /**begin repeat diff --git a/numpy/core/src/npymath/npy_math_internal.h.src b/numpy/core/src/npymath/npy_math_internal.h.src index 093e51b2d..f2e5229b0 100644 --- a/numpy/core/src/npymath/npy_math_internal.h.src +++ b/numpy/core/src/npymath/npy_math_internal.h.src @@ -678,3 +678,41 @@ npy_divmod@c@(@type@ a, @type@ b, @type@ *modulus) #undef DEG2RAD /**end repeat**/ + +/**begin repeat + * + * #type = npy_uint, npy_ulong, npy_ulonglong# + * #c = u,ul,ull# + */ +NPY_INPLACE @type@ +npy_gcd@c@(@type@ a, @type@ b) +{ + @type@ c; + while (a != 0) { + c = a; + a = b%a; + b = c; + } + return b; +} + +NPY_INPLACE @type@ +npy_lcm@c@(@type@ a, @type@ b) +{ + @type@ gcd = npy_gcd@c@(a, b); + return gcd == 0 ? 0 : a / gcd * b; +} +/**end repeat**/ + +/**begin repeat + * + * #type = (npy_int, npy_long, npy_longlong)*2# + * #c = (,l,ll)*2# + * #func=gcd*3,lcm*3# + */ +NPY_INPLACE @type@ +npy_@func@@c@(@type@ a, @type@ b) +{ + return npy_@func@u@c@(a < 0 ? -a : a, b < 0 ? -b : b); +} +/**end repeat**/ diff --git a/numpy/core/src/npysort/quicksort.c.src b/numpy/core/src/npysort/quicksort.c.src index ff0e8a149..49a2c4906 100644 --- a/numpy/core/src/npysort/quicksort.c.src +++ b/numpy/core/src/npysort/quicksort.c.src @@ -482,7 +482,7 @@ npy_quicksort(void *start, npy_intp num, void *varr) pj = pr - elsize; GENERIC_SWAP(pm, pj, elsize); /* - * Generic comparisons may be buggy, so don't rely on the sentinals + * Generic comparisons may be buggy, so don't rely on the sentinels * to keep the pointers from going out of bounds. */ for (;;) { diff --git a/numpy/core/src/private/lowlevel_strided_loops.h b/numpy/core/src/private/lowlevel_strided_loops.h index e785c6796..094612b7d 100644 --- a/numpy/core/src/private/lowlevel_strided_loops.h +++ b/numpy/core/src/private/lowlevel_strided_loops.h @@ -414,20 +414,24 @@ PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp *shape, char **out_dataC, npy_intp *out_stridesC); /* - * Return number of elements that must be peeled from - * the start of 'addr' with 'nvals' elements of size 'esize' - * in order to reach 'alignment'. - * alignment must be a power of two. - * see npy_blocked_end for an example + * Return number of elements that must be peeled from the start of 'addr' with + * 'nvals' elements of size 'esize' in order to reach blockable alignment. + * The required alignment in bytes is passed as the 'alignment' argument and + * must be a power of two. This function is used to prepare an array for + * blocking. See the 'npy_blocked_end' function documentation below for an + * example of how this function is used. */ -static NPY_INLINE npy_uintp +static NPY_INLINE npy_intp npy_aligned_block_offset(const void * addr, const npy_uintp esize, const npy_uintp alignment, const npy_uintp nvals) { - const npy_uintp offset = (npy_uintp)addr & (alignment - 1); - npy_uintp peel = offset ? (alignment - offset) / esize : 0; - peel = nvals < peel ? nvals : peel; - return peel; + npy_uintp offset, peel; + + offset = (npy_uintp)addr & (alignment - 1); + peel = offset ? (alignment - offset) / esize : 0; + peel = (peel <= nvals) ? peel : nvals; + assert(peel <= NPY_MAX_INTP); + return (npy_intp)peel; } /* @@ -450,11 +454,16 @@ npy_aligned_block_offset(const void * addr, const npy_uintp esize, * for(; i < n; i++) * <scalar-op> */ -static NPY_INLINE npy_uintp -npy_blocked_end(const npy_uintp offset, const npy_uintp esize, +static NPY_INLINE npy_intp +npy_blocked_end(const npy_uintp peel, const npy_uintp esize, const npy_uintp vsz, const npy_uintp nvals) { - return nvals - offset - (nvals - offset) % (vsz / esize); + npy_uintp ndiff = nvals - peel; + npy_uintp res = (ndiff - ndiff % (vsz / esize)); + + assert(nvals >= peel); + assert(res <= NPY_MAX_INTP); + return (npy_intp)(res); } diff --git a/numpy/core/src/private/mem_overlap.c b/numpy/core/src/private/mem_overlap.c index 2145791e1..21db1893b 100644 --- a/numpy/core/src/private/mem_overlap.c +++ b/numpy/core/src/private/mem_overlap.c @@ -415,7 +415,8 @@ diophantine_dfs(unsigned int n, x[0] = x1 + c1*t_l; x[1] = x2 - c2*t_l; if (require_ub_nontrivial) { - int j, is_ub_trivial; + unsigned int j; + int is_ub_trivial; is_ub_trivial = 1; for (j = 0; j < n; ++j) { @@ -711,7 +712,7 @@ static int strides_to_terms(PyArrayObject *arr, diophantine_term_t *terms, unsigned int *nterms, int skip_empty) { - unsigned int i; + int i; for (i = 0; i < PyArray_NDIM(arr); ++i) { if (skip_empty) { @@ -756,9 +757,11 @@ solve_may_share_memory(PyArrayObject *a, PyArrayObject *b, Py_ssize_t max_work) { npy_int64 rhs; - diophantine_term_t terms[2*NPY_MAXDIMS+2]; - npy_uintp start1 = 0, start2 = 0, end1 = 0, end2 = 0, size1 = 0, size2 = 0; - npy_int64 x[2*NPY_MAXDIMS+2]; + diophantine_term_t terms[2*NPY_MAXDIMS + 2]; + npy_uintp start1 = 0, end1 = 0, size1 = 0; + npy_uintp start2 = 0, end2 = 0, size2 = 0; + npy_uintp uintp_rhs; + npy_int64 x[2*NPY_MAXDIMS + 2]; unsigned int nterms; get_array_memory_extents(a, &start1, &end1, &size1); @@ -797,12 +800,12 @@ solve_may_share_memory(PyArrayObject *a, PyArrayObject *b, the extent check above.) */ - rhs = MIN(end2 - 1 - start1, end1 - 1 - start2); - - if (rhs != (npy_uintp)rhs) { + uintp_rhs = MIN(end2 - 1 - start1, end1 - 1 - start2); + if (uintp_rhs > NPY_MAX_INT64) { /* Integer overflow */ return MEM_OVERLAP_OVERFLOW; } + rhs = (npy_int64)uintp_rhs; nterms = 0; if (strides_to_terms(a, terms, &nterms, 1)) { @@ -845,8 +848,7 @@ solve_may_have_internal_overlap(PyArrayObject *a, Py_ssize_t max_work) { diophantine_term_t terms[NPY_MAXDIMS+1]; npy_int64 x[NPY_MAXDIMS+1]; - unsigned int nterms; - int i, j; + unsigned int i, j, nterms; if (PyArray_ISCONTIGUOUS(a)) { /* Quick case */ diff --git a/numpy/core/src/private/npy_binsearch.h.src b/numpy/core/src/private/npy_binsearch.h.src index 3b2c59487..ce3b34b0e 100644 --- a/numpy/core/src/private/npy_binsearch.h.src +++ b/numpy/core/src/private/npy_binsearch.h.src @@ -5,6 +5,8 @@ #include <numpy/npy_common.h> #include <numpy/ndarraytypes.h> +#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0])) + typedef void (PyArray_BinSearchFunc)(const char*, const char*, char*, npy_intp, npy_intp, npy_intp, npy_intp, npy_intp, @@ -16,15 +18,15 @@ typedef int (PyArray_ArgBinSearchFunc)(const char*, const char*, npy_intp, npy_intp, npy_intp, PyArrayObject*); -struct binsearch_map { - enum NPY_TYPES typenum; +typedef struct { + int typenum; PyArray_BinSearchFunc *binsearch[NPY_NSEARCHSIDES]; -}; +} binsearch_map; -struct argbinsearch_map { - enum NPY_TYPES typenum; +typedef struct { + int typenum; PyArray_ArgBinSearchFunc *argbinsearch[NPY_NSEARCHSIDES]; -}; +} argbinsearch_map; /**begin repeat * @@ -72,7 +74,7 @@ npy_argbinsearch_@side@(const char *arr, const char *key, * #Arg = , Arg# */ -static struct @arg@binsearch_map _@arg@binsearch_map[] = { +static @arg@binsearch_map _@arg@binsearch_map[] = { /* If adding new types, make sure to keep them ordered by type num */ /**begin repeat1 * @@ -100,10 +102,9 @@ static PyArray_@Arg@BinSearchFunc *gen@arg@binsearch_map[] = { static NPY_INLINE PyArray_@Arg@BinSearchFunc* get_@arg@binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side) { - static npy_intp num_funcs = sizeof(_@arg@binsearch_map) / - sizeof(_@arg@binsearch_map[0]); + npy_intp nfuncs = ARRAY_SIZE(_@arg@binsearch_map); npy_intp min_idx = 0; - npy_intp max_idx = num_funcs; + npy_intp max_idx = nfuncs; int type = dtype->type_num; if (side >= NPY_NSEARCHSIDES) { @@ -125,7 +126,8 @@ get_@arg@binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side) } } - if (min_idx < num_funcs && _@arg@binsearch_map[min_idx].typenum == type) { + if (min_idx < nfuncs && + _@arg@binsearch_map[min_idx].typenum == type) { return _@arg@binsearch_map[min_idx].@arg@binsearch[side]; } @@ -137,4 +139,6 @@ get_@arg@binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side) } /**end repeat**/ +#undef ARRAY_SIZE + #endif diff --git a/numpy/core/src/private/npy_longdouble.c b/numpy/core/src/private/npy_longdouble.c index e8cf7eaf9..508fbceac 100644 --- a/numpy/core/src/private/npy_longdouble.c +++ b/numpy/core/src/private/npy_longdouble.c @@ -13,7 +13,8 @@ } while (0) -/* Heavily derived from PyLong_FromDouble +/* + * Heavily derived from PyLong_FromDouble * Notably, we can't set the digits directly, so have to shift and or instead. */ NPY_VISIBILITY_HIDDEN PyObject * @@ -21,8 +22,10 @@ npy_longdouble_to_PyLong(npy_longdouble ldval) { PyObject *v; PyObject *l_chunk_size; - // number of bits to extract at a time. CPython uses 30, but that's because - // it's tied to the internal long representation + /* + * number of bits to extract at a time. CPython uses 30, but that's because + * it's tied to the internal long representation + */ const int chunk_size = NPY_BITSOF_LONGLONG; npy_longdouble frac; int i, ndig, expo, neg; diff --git a/numpy/core/src/private/npy_partition.h.src b/numpy/core/src/private/npy_partition.h.src index 07aecd4f8..a22cf911c 100644 --- a/numpy/core/src/private/npy_partition.h.src +++ b/numpy/core/src/private/npy_partition.h.src @@ -24,8 +24,9 @@ #include <numpy/npy_common.h> #include <numpy/ndarraytypes.h> -#define NPY_MAX_PIVOT_STACK 50 +#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0])) +#define NPY_MAX_PIVOT_STACK 50 /**begin repeat * @@ -56,7 +57,7 @@ NPY_VISIBILITY_HIDDEN int aintroselect_@suff@(@type@ *v, npy_intp* tosort, npy_i /**end repeat**/ typedef struct { - enum NPY_TYPES typenum; + int typenum; PyArray_PartitionFunc * part[NPY_NSELECTS]; PyArray_ArgPartitionFunc * argpart[NPY_NSELECTS]; } part_map; @@ -92,10 +93,12 @@ static NPY_INLINE PyArray_PartitionFunc * get_partition_func(int type, NPY_SELECTKIND which) { npy_intp i; + npy_intp ntypes = ARRAY_SIZE(_part_map); + if (which >= NPY_NSELECTS) { return NULL; } - for (i = 0; i < sizeof(_part_map)/sizeof(_part_map[0]); i++) { + for (i = 0; i < ntypes; i++) { if (type == _part_map[i].typenum) { return _part_map[i].part[which]; } @@ -108,10 +111,12 @@ static NPY_INLINE PyArray_ArgPartitionFunc * get_argpartition_func(int type, NPY_SELECTKIND which) { npy_intp i; + npy_intp ntypes = ARRAY_SIZE(_part_map); + if (which >= NPY_NSELECTS) { return NULL; } - for (i = 0; i < sizeof(_part_map)/sizeof(_part_map[0]); i++) { + for (i = 0; i < ntypes; i++) { if (type == _part_map[i].typenum) { return _part_map[i].argpart[which]; } @@ -119,4 +124,6 @@ get_argpartition_func(int type, NPY_SELECTKIND which) return NULL; } +#undef ARRAY_SIZE + #endif diff --git a/numpy/core/src/umath/extobj.c b/numpy/core/src/umath/extobj.c index 344981622..e44036358 100644 --- a/numpy/core/src/umath/extobj.c +++ b/numpy/core/src/umath/extobj.c @@ -76,6 +76,11 @@ _error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int * NPY_ALLOW_C_API_DEF + /* don't need C API for a simple ignore */ + if (method == UFUNC_ERR_IGNORE) { + return 0; + } + /* don't need C API for a simple print */ if (method == UFUNC_ERR_PRINT) { if (*first) { diff --git a/numpy/core/src/umath/funcs.inc.src b/numpy/core/src/umath/funcs.inc.src index 5613c30ee..da2ab07f8 100644 --- a/numpy/core/src/umath/funcs.inc.src +++ b/numpy/core/src/umath/funcs.inc.src @@ -8,6 +8,7 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "npy_pycompat.h" +#include "npy_import.h" /* @@ -158,6 +159,73 @@ npy_ObjectLogicalNot(PyObject *i1) } } +static PyObject * +npy_ObjectGCD(PyObject *i1, PyObject *i2) +{ + PyObject *gcd = NULL; + + /* use math.gcd if available, and valid on the provided types */ +#if PY_VERSION_HEX >= 0x03050000 + { + static PyObject *math_gcd_func = NULL; + + npy_cache_import("math", "gcd", &math_gcd_func); + if (math_gcd_func == NULL) { + return NULL; + } + gcd = PyObject_CallFunction(math_gcd_func, "OO", i1, i2); + if (gcd != NULL) { + return gcd; + } + /* silence errors, and fall back on pure-python gcd */ + PyErr_Clear(); + } +#endif + + /* otherwise, use our internal one, written in python */ + { + static PyObject *internal_gcd_func = NULL; + + npy_cache_import("numpy.core._internal", "_gcd", &internal_gcd_func); + if (internal_gcd_func == NULL) { + return NULL; + } + gcd = PyObject_CallFunction(internal_gcd_func, "OO", i1, i2); + if (gcd == NULL) { + return NULL; + } + /* _gcd has some unusual behaviour regarding sign */ + return PyNumber_Absolute(gcd); + } +} + +static PyObject * +npy_ObjectLCM(PyObject *i1, PyObject *i2) +{ + /* lcm(a, b) = abs(a // gcd(a, b) * b) */ + + PyObject *gcd = npy_ObjectGCD(i1, i2); + PyObject *tmp; + if(gcd == NULL) { + return NULL; + } + /* Floor divide preserves integer types - we know the division will have + * no remainder + */ + tmp = PyNumber_FloorDivide(i1, gcd); + if(tmp == NULL) { + return NULL; + } + + tmp = PyNumber_Multiply(tmp, i2); + if(tmp == NULL) { + return NULL; + } + + /* even though we fix gcd to be positive, we need to do it again here */ + return PyNumber_Absolute(tmp); +} + /* ***************************************************************************** ** COMPLEX FUNCTIONS ** diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 789717555..c1dfe15da 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1041,6 +1041,7 @@ NPY_NO_EXPORT void /**begin repeat * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG# * #type = npy_byte, npy_short, npy_int, npy_long, npy_longlong# + * #c = ,,,l,ll# */ NPY_NO_EXPORT NPY_GCC_OPT_3 void @@ -1132,11 +1133,26 @@ NPY_NO_EXPORT void } } +/**begin repeat1 + * #kind = gcd, lcm# + **/ +NPY_NO_EXPORT void +@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + *((@type@ *)op1) = npy_@kind@@c@(in1, in2); + } +} +/**end repeat1**/ + /**end repeat**/ /**begin repeat * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG# * #type = npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong# + * #c = u,u,u,ul,ull# */ NPY_NO_EXPORT void @@ -1204,6 +1220,20 @@ NPY_NO_EXPORT void } } +/**begin repeat1 + * #kind = gcd, lcm# + **/ +NPY_NO_EXPORT void +@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + *((@type@ *)op1) = npy_@kind@@c@(in1, in2); + } +} +/**end repeat1**/ + /**end repeat**/ /* @@ -1623,11 +1653,12 @@ NPY_NO_EXPORT void * when updating also update similar complex floats summation */ static @type@ -pairwise_sum_@TYPE@(char *a, npy_uintp n, npy_intp stride) +pairwise_sum_@TYPE@(char *a, npy_intp n, npy_intp stride) { if (n < 8) { npy_intp i; @type@ res = 0.; + for (i = 0; i < n; i++) { res += @trf@(*((@dtype@*)(a + i * stride))); } @@ -1653,7 +1684,7 @@ pairwise_sum_@TYPE@(char *a, npy_uintp n, npy_intp stride) for (i = 8; i < n - (n % 8); i += 8) { /* small blocksizes seems to mess with hardware prefetch */ - NPY_PREFETCH(a + (i + 512 / sizeof(@dtype@)) * stride, 0, 3); + NPY_PREFETCH(a + (i + 512/(npy_intp)sizeof(@dtype@))*stride, 0, 3); r[0] += @trf@(*((@dtype@ *)(a + (i + 0) * stride))); r[1] += @trf@(*((@dtype@ *)(a + (i + 1) * stride))); r[2] += @trf@(*((@dtype@ *)(a + (i + 2) * stride))); @@ -1676,7 +1707,8 @@ pairwise_sum_@TYPE@(char *a, npy_uintp n, npy_intp stride) } else { /* divide by two but avoid non-multiples of unroll factor */ - npy_uintp n2 = n / 2; + npy_intp n2 = n / 2; + n2 -= n2 % 8; return pairwise_sum_@TYPE@(a, n2, stride) + pairwise_sum_@TYPE@(a + n2 * stride, n - n2, stride); @@ -2395,12 +2427,13 @@ HALF_ldexp_long(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UN /* similar to pairwise sum of real floats */ static void -pairwise_sum_@TYPE@(@ftype@ *rr, @ftype@ * ri, char * a, npy_uintp n, +pairwise_sum_@TYPE@(@ftype@ *rr, @ftype@ * ri, char * a, npy_intp n, npy_intp stride) { assert(n % 2 == 0); if (n < 8) { npy_intp i; + *rr = 0.; *ri = 0.; for (i = 0; i < n; i += 2) { @@ -2429,7 +2462,7 @@ pairwise_sum_@TYPE@(@ftype@ *rr, @ftype@ * ri, char * a, npy_uintp n, for (i = 8; i < n - (n % 8); i += 8) { /* small blocksizes seems to mess with hardware prefetch */ - NPY_PREFETCH(a + (i + 512 / sizeof(@ftype@)) * stride, 0, 3); + NPY_PREFETCH(a + (i + 512/(npy_intp)sizeof(@ftype@))*stride, 0, 3); r[0] += *((@ftype@ *)(a + (i + 0) * stride)); r[1] += *((@ftype@ *)(a + (i + 0) * stride + sizeof(@ftype@))); r[2] += *((@ftype@ *)(a + (i + 2) * stride)); @@ -2454,7 +2487,8 @@ pairwise_sum_@TYPE@(@ftype@ *rr, @ftype@ * ri, char * a, npy_uintp n, else { /* divide by two but avoid non-multiples of unroll factor */ @ftype@ rr1, ri1, rr2, ri2; - npy_uintp n2 = n / 2; + npy_intp n2 = n / 2; + n2 -= n2 % 8; pairwise_sum_@TYPE@(&rr1, &ri1, a, n2, stride); pairwise_sum_@TYPE@(&rr2, &ri2, a + n2 * stride, n - n2, stride); diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src index a978b03ee..a01ef1529 100644 --- a/numpy/core/src/umath/loops.h.src +++ b/numpy/core/src/umath/loops.h.src @@ -140,6 +140,12 @@ NPY_NO_EXPORT void NPY_NO_EXPORT void @S@@TYPE@_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +@S@@TYPE@_gcd(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)); + +NPY_NO_EXPORT void +@S@@TYPE@_lcm(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)); + /**end repeat1**/ /**end repeat**/ diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c index 390b28c31..681d3fefa 100644 --- a/numpy/core/src/umath/reduction.c +++ b/numpy/core/src/umath/reduction.c @@ -189,7 +189,7 @@ conform_reduce_result(int ndim, npy_bool *axis_flags, } Py_INCREF(ret); - if (PyArray_SetUpdateIfCopyBase(ret_copy, (PyArrayObject *)ret) < 0) { + if (PyArray_SetWritebackIfCopyBase(ret_copy, (PyArrayObject *)ret) < 0) { Py_DECREF(ret); Py_DECREF(ret_copy); return NULL; @@ -249,29 +249,20 @@ PyArray_CreateReduceResult(PyArrayObject *operand, PyArrayObject *out, } /* - * Checks that there are only zero or one dimensions selected in 'axis_flags', - * and raises an error about a non-reorderable reduction if not. + * Count the number of dimensions selected in 'axis_flags' */ static int -check_nonreorderable_axes(int ndim, npy_bool *axis_flags, const char *funcname) +count_axes(int ndim, npy_bool *axis_flags) { - int idim, single_axis = 0; + int idim; + int naxes = 0; + for (idim = 0; idim < ndim; ++idim) { if (axis_flags[idim]) { - if (single_axis) { - PyErr_Format(PyExc_ValueError, - "reduction operation '%s' is not reorderable, " - "so only one axis may be specified", - funcname); - return -1; - } - else { - single_axis = 1; - } + naxes++; } } - - return 0; + return naxes; } /* @@ -296,11 +287,6 @@ check_nonreorderable_axes(int ndim, npy_bool *axis_flags, const char *funcname) * operand : The array being reduced. * axis_flags : An array of boolean flags, one for each axis of 'operand'. * When a flag is True, it indicates to reduce along that axis. - * reorderable : If True, the reduction being done is reorderable, which - * means specifying multiple axes of reduction at once is ok, - * and the reduction code may calculate the reduction in an - * arbitrary order. The calculation may be reordered because - * of cache behavior or multithreading requirements. * out_skip_first_count : This gets populated with the number of first-visit * elements that should be skipped during the * iteration loop. @@ -314,7 +300,7 @@ check_nonreorderable_axes(int ndim, npy_bool *axis_flags, const char *funcname) NPY_NO_EXPORT PyArrayObject * PyArray_InitializeReduceResult( PyArrayObject *result, PyArrayObject *operand, - npy_bool *axis_flags, int reorderable, + npy_bool *axis_flags, npy_intp *out_skip_first_count, const char *funcname) { npy_intp *strides, *shape, shape_orig[NPY_MAXDIMS]; @@ -326,15 +312,6 @@ PyArray_InitializeReduceResult( /* Default to no skipping first-visit elements in the iteration */ *out_skip_first_count = 0; - /* - * If this reduction is non-reorderable, make sure there are - * only 0 or 1 axes in axis_flags. - */ - if (!reorderable && check_nonreorderable_axes(ndim, - axis_flags, funcname) < 0) { - return NULL; - } - /* Take a view into 'operand' which we can modify. */ op_view = (PyArrayObject *)PyArray_View(operand, NULL, &PyArray_Type); if (op_view == NULL) { @@ -411,8 +388,8 @@ PyArray_InitializeReduceResult( /* * This function executes all the standard NumPy reduction function - * boilerplate code, just calling assign_identity and the appropriate - * inner loop function where necessary. + * boilerplate code, just calling the appropriate inner loop function where + * necessary. * * operand : The array to be reduced. * out : NULL, or the array into which to place the result. @@ -432,11 +409,11 @@ PyArray_InitializeReduceResult( * with size one. * subok : If true, the result uses the subclass of operand, otherwise * it is always a base class ndarray. - * assign_identity : If NULL, PyArray_InitializeReduceResult is used, otherwise - * this function is called to initialize the result to + * identity : If Py_None, PyArray_InitializeReduceResult is used, otherwise + * this value is used to initialize the result to * the reduction's unit. * loop : The loop which does the reduction. - * data : Data which is passed to assign_identity and the inner loop. + * data : Data which is passed to the inner loop. * buffersize : Buffer size for the iterator. For the default, pass in 0. * funcname : The name of the reduction function, for error messages. * errormask : forwarded from _get_bufsize_errmask @@ -459,7 +436,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out, npy_bool *axis_flags, int reorderable, int keepdims, int subok, - PyArray_AssignReduceIdentityFunc *assign_identity, + PyObject *identity, PyArray_ReduceLoopFunc *loop, void *data, npy_intp buffersize, const char *funcname, int errormask) @@ -473,6 +450,16 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out, PyArray_Descr *op_dtypes[2]; npy_uint32 flags, op_flags[2]; + /* More than one axis means multiple orders are possible */ + if (!reorderable && count_axes(PyArray_NDIM(operand), axis_flags) > 1) { + PyErr_Format(PyExc_ValueError, + "reduction operation '%s' is not reorderable, " + "so at most one axis may be specified", + funcname); + return NULL; + } + + /* Validate that the parameters for future expansion are NULL */ if (wheremask != NULL) { PyErr_SetString(PyExc_RuntimeError, @@ -485,7 +472,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out, * This either conforms 'out' to the ndim of 'operand', or allocates * a new array appropriate for this reduction. * - * A new array with UPDATEIFCOPY is allocated if operand and out have memory + * A new array with WRITEBACKIFCOPY is allocated if operand and out have memory * overlap. */ Py_INCREF(result_dtype); @@ -500,26 +487,16 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out, * Initialize the result to the reduction unit if possible, * otherwise copy the initial values and get a view to the rest. */ - if (assign_identity != NULL) { - /* - * If this reduction is non-reorderable, make sure there are - * only 0 or 1 axes in axis_flags. - */ - if (!reorderable && check_nonreorderable_axes(PyArray_NDIM(operand), - axis_flags, funcname) < 0) { - goto fail; - } - - if (assign_identity(result, data) < 0) { + if (identity != Py_None) { + if (PyArray_FillWithScalar(result, identity) < 0) { goto fail; } op_view = operand; Py_INCREF(op_view); } else { - op_view = PyArray_InitializeReduceResult(result, operand, - axis_flags, reorderable, - &skip_first_count, funcname); + op_view = PyArray_InitializeReduceResult( + result, operand, axis_flags, &skip_first_count, funcname); if (op_view == NULL) { goto fail; } @@ -611,6 +588,7 @@ finish: } } else { + PyArray_ResolveWritebackIfCopy(result); /* prevent spurious warnings */ Py_DECREF(result); result = out; Py_INCREF(result); @@ -619,6 +597,7 @@ finish: return result; fail: + PyArray_ResolveWritebackIfCopy(result); /* prevent spurious warnings */ Py_XDECREF(result); Py_XDECREF(op_view); if (iter != NULL) { diff --git a/numpy/core/src/umath/reduction.h b/numpy/core/src/umath/reduction.h index 7a55c5df5..dfaeabcbb 100644 --- a/numpy/core/src/umath/reduction.h +++ b/numpy/core/src/umath/reduction.h @@ -25,7 +25,7 @@ typedef int (PyArray_AssignReduceIdentityFunc)(PyArrayObject *result, * the loop, such as when the iternext() function never calls * a function which could raise a Python exception. * - * Ths skip_first_count parameter indicates how many elements need to be + * The skip_first_count parameter indicates how many elements need to be * skipped based on NpyIter_IsFirstVisit checks. This can only be positive * when the 'assign_identity' parameter was NULL when calling * PyArray_ReduceWrapper. @@ -109,8 +109,8 @@ typedef int (PyArray_ReduceLoopFunc)(NpyIter *iter, /* * This function executes all the standard NumPy reduction function - * boilerplate code, just calling assign_identity and the appropriate - * inner loop function where necessary. + * boilerplate code, just calling the appropriate inner loop function where + * necessary. * * operand : The array to be reduced. * out : NULL, or the array into which to place the result. @@ -130,11 +130,11 @@ typedef int (PyArray_ReduceLoopFunc)(NpyIter *iter, * with size one. * subok : If true, the result uses the subclass of operand, otherwise * it is always a base class ndarray. - * assign_identity : If NULL, PyArray_InitializeReduceResult is used, otherwise - * this function is called to initialize the result to + * identity : If Py_None, PyArray_InitializeReduceResult is used, otherwise + * this value is used to initialize the result to * the reduction's unit. * loop : The loop which does the reduction. - * data : Data which is passed to assign_identity and the inner loop. + * data : Data which is passed to the inner loop. * buffersize : Buffer size for the iterator. For the default, pass in 0. * funcname : The name of the reduction function, for error messages. * errormask : forwarded from _get_bufsize_errmask @@ -148,7 +148,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out, npy_bool *axis_flags, int reorderable, int keepdims, int subok, - PyArray_AssignReduceIdentityFunc *assign_identity, + PyObject *identity, PyArray_ReduceLoopFunc *loop, void *data, npy_intp buffersize, const char *funcname, int errormask); diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src index 3b23151f1..7b424cc74 100644 --- a/numpy/core/src/umath/scalarmath.c.src +++ b/numpy/core/src/umath/scalarmath.c.src @@ -1424,7 +1424,11 @@ static PyObject * #ifndef NPY_PY3K /* Invoke long.__int__ to try to downcast */ - long_result = Py_TYPE(long_result)->tp_as_number->nb_int(long_result); + { + PyObject *before_downcast = long_result; + long_result = Py_TYPE(long_result)->tp_as_number->nb_int(long_result); + Py_DECREF(before_downcast); + } #endif return long_result; diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index 8a799fe61..2241414ac 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -22,40 +22,38 @@ #include "numpy/npy_math.h" #ifdef NPY_HAVE_SSE2_INTRINSICS #include <emmintrin.h> +#if !defined(_MSC_VER) || _MSC_VER >= 1600 +#include <immintrin.h> +#else +#undef __AVX2__ +#undef __AVX512F__ +#endif #endif #include <assert.h> #include <stdlib.h> #include <float.h> #include <string.h> /* for memcpy */ -/* Figure out the right abs function for pointer addresses */ -static NPY_INLINE npy_intp -abs_intp(npy_intp x) +static NPY_INLINE npy_uintp +abs_ptrdiff(char *a, char *b) { -#if (NPY_SIZEOF_INTP <= NPY_SIZEOF_INT) - return abs(x); -#elif (NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG) - return labs(x); -#elif defined(_MSC_VER) && (_MSC_VER < 1600) - /* llabs is not available with Visual Studio 2008 */ - return x > 0 ? x : -x; -#else - return llabs(x); -#endif + return (a > b) ? (a - b) : (b - a); } + /* * stride is equal to element size and input and destination are equal or - * don't overlap within one register + * don't overlap within one register. The check of the steps against + * esize also quarantees that steps are >= 0. */ #define IS_BLOCKABLE_UNARY(esize, vsize) \ (steps[0] == (esize) && steps[0] == steps[1] && \ (npy_is_aligned(args[0], esize) && npy_is_aligned(args[1], esize)) && \ - ((abs_intp(args[1] - args[0]) >= (vsize)) || \ - ((abs_intp(args[1] - args[0]) == 0)))) + ((abs_ptrdiff(args[1], args[0]) >= (vsize)) || \ + ((abs_ptrdiff(args[1], args[0]) == 0)))) #define IS_BLOCKABLE_REDUCE(esize, vsize) \ - (steps[1] == (esize) && abs_intp(args[1] - args[0]) >= (vsize) && \ + (steps[1] == (esize) && abs_ptrdiff(args[1], args[0]) >= (vsize) && \ npy_is_aligned(args[1], (esize)) && \ npy_is_aligned(args[0], (esize))) @@ -63,26 +61,26 @@ abs_intp(npy_intp x) (steps[0] == steps[1] && steps[1] == steps[2] && steps[2] == (esize) && \ npy_is_aligned(args[2], (esize)) && npy_is_aligned(args[1], (esize)) && \ npy_is_aligned(args[0], (esize)) && \ - (abs_intp(args[2] - args[0]) >= (vsize) || \ - abs_intp(args[2] - args[0]) == 0) && \ - (abs_intp(args[2] - args[1]) >= (vsize) || \ - abs_intp(args[2] - args[1]) >= 0)) + (abs_ptrdiff(args[2], args[0]) >= (vsize) || \ + abs_ptrdiff(args[2], args[0]) == 0) && \ + (abs_ptrdiff(args[2], args[1]) >= (vsize) || \ + abs_ptrdiff(args[2], args[1]) >= 0)) #define IS_BLOCKABLE_BINARY_SCALAR1(esize, vsize) \ (steps[0] == 0 && steps[1] == steps[2] && steps[2] == (esize) && \ npy_is_aligned(args[2], (esize)) && npy_is_aligned(args[1], (esize)) && \ - ((abs_intp(args[2] - args[1]) >= (vsize)) || \ - (abs_intp(args[2] - args[1]) == 0)) && \ - abs_intp(args[2] - args[0]) >= (esize)) + ((abs_ptrdiff(args[2], args[1]) >= (vsize)) || \ + (abs_ptrdiff(args[2], args[1]) == 0)) && \ + abs_ptrdiff(args[2], args[0]) >= (esize)) #define IS_BLOCKABLE_BINARY_SCALAR2(esize, vsize) \ (steps[1] == 0 && steps[0] == steps[2] && steps[2] == (esize) && \ npy_is_aligned(args[2], (esize)) && npy_is_aligned(args[0], (esize)) && \ - ((abs_intp(args[2] - args[0]) >= (vsize)) || \ - (abs_intp(args[2] - args[0]) == 0)) && \ - abs_intp(args[2] - args[1]) >= (esize)) + ((abs_ptrdiff(args[2], args[0]) >= (vsize)) || \ + (abs_ptrdiff(args[2], args[0]) == 0)) && \ + abs_ptrdiff(args[2], args[1]) >= (esize)) -#undef abs_intp +#undef abs_ptrdiff #define IS_BLOCKABLE_BINARY_BOOL(esize, vsize) \ (steps[0] == (esize) && steps[0] == steps[1] && steps[2] == (1) && \ @@ -401,7 +399,11 @@ static NPY_INLINE npy_double sse2_horizontal_@VOP@___m128d(__m128d v) * #scalarf = npy_sqrtf, npy_sqrt# * #c = f, # * #vtype = __m128, __m128d# + * #vtype256 = __m256, __m256d# + * #vtype512 = __m512, __m512d# * #vpre = _mm, _mm# + * #vpre256 = _mm256, _mm256# + * #vpre512 = _mm512, _mm512# * #vsuf = ps, pd# * #vsufs = ss, sd# * #nan = NPY_NANF, NPY_NAN# @@ -420,6 +422,115 @@ static NPY_INLINE npy_double sse2_horizontal_@VOP@___m128d(__m128d v) static void sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) { +#ifdef __AVX512F__ + LOOP_BLOCK_ALIGN_VAR(op, @type@, 64) + op[i] = ip1[i] @OP@ ip2[i]; + /* lots of specializations, to squeeze out max performance */ + if (npy_is_aligned(&ip1[i], 64) && npy_is_aligned(&ip2[i], 64)) { + if (ip1 == ip2) { + LOOP_BLOCKED(@type@, 64) { + @vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, a); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, 64) { + @vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]); + @vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + } + else if (npy_is_aligned(&ip1[i], 64)) { + LOOP_BLOCKED(@type@, 64) { + @vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]); + @vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + else if (npy_is_aligned(&ip2[i], 64)) { + LOOP_BLOCKED(@type@, 64) { + @vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]); + @vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + else { + if (ip1 == ip2) { + LOOP_BLOCKED(@type@, 64) { + @vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, a); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, 64) { + @vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]); + @vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + } +#elif __AVX2__ + LOOP_BLOCK_ALIGN_VAR(op, @type@, 32) + op[i] = ip1[i] @OP@ ip2[i]; + /* lots of specializations, to squeeze out max performance */ + if (npy_is_aligned(&ip1[i], 32) && npy_is_aligned(&ip2[i], 32)) { + if (ip1 == ip2) { + LOOP_BLOCKED(@type@, 32) { + @vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, a); + @vpre256@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, 32) { + @vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]); + @vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); + @vpre256@_store_@vsuf@(&op[i], c); + } + } + } + else if (npy_is_aligned(&ip1[i], 32)) { + LOOP_BLOCKED(@type@, 32) { + @vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]); + @vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); + @vpre256@_store_@vsuf@(&op[i], c); + } + } + else if (npy_is_aligned(&ip2[i], 32)) { + LOOP_BLOCKED(@type@, 32) { + @vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]); + @vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); + @vpre256@_store_@vsuf@(&op[i], c); + } + } + else { + if (ip1 == ip2) { + LOOP_BLOCKED(@type@, 32) { + @vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, a); + @vpre256@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, 32) { + @vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]); + @vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); + @vpre256@_store_@vsuf@(&op[i], c); + } + } + } +#else LOOP_BLOCK_ALIGN_VAR(op, @type@, 16) op[i] = ip1[i] @OP@ ip2[i]; /* lots of specializations, to squeeze out max performance */ @@ -473,6 +584,7 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) } } } +#endif LOOP_BLOCKED_END { op[i] = ip1[i] @OP@ ip2[i]; } @@ -482,6 +594,45 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) static void sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) { +#ifdef __AVX512F__ + const @vtype512@ a = @vpre512@_set1_@vsuf@(ip1[0]); + LOOP_BLOCK_ALIGN_VAR(op, @type@, 64) + op[i] = ip1[0] @OP@ ip2[i]; + if (npy_is_aligned(&ip2[i], 64)) { + LOOP_BLOCKED(@type@, 64) { + @vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, 64) { + @vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + + +#elif __AVX2__ + const @vtype256@ a = @vpre256@_set1_@vsuf@(ip1[0]); + LOOP_BLOCK_ALIGN_VAR(op, @type@, 32) + op[i] = ip1[0] @OP@ ip2[i]; + if (npy_is_aligned(&ip2[i], 32)) { + LOOP_BLOCKED(@type@, 32) { + @vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); + @vpre256@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, 32) { + @vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); + @vpre256@_store_@vsuf@(&op[i], c); + } + } +#else const @vtype@ a = @vpre@_set1_@vsuf@(ip1[0]); LOOP_BLOCK_ALIGN_VAR(op, @type@, 16) op[i] = ip1[0] @OP@ ip2[i]; @@ -499,6 +650,7 @@ sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_i @vpre@_store_@vsuf@(&op[i], c); } } +#endif LOOP_BLOCKED_END { op[i] = ip1[0] @OP@ ip2[i]; } @@ -508,6 +660,44 @@ sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_i static void sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) { +#ifdef __AVX512F__ + const @vtype512@ b = @vpre512@_set1_@vsuf@(ip2[0]); + LOOP_BLOCK_ALIGN_VAR(op, @type@, 64) + op[i] = ip1[i] @OP@ ip2[0]; + if (npy_is_aligned(&ip1[i], 64)) { + LOOP_BLOCKED(@type@, 64) { + @vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, 64) { + @vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]); + @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); + @vpre512@_store_@vsuf@(&op[i], c); + } + } + +#elif __AVX2__ + const @vtype256@ b = @vpre256@_set1_@vsuf@(ip2[0]); + LOOP_BLOCK_ALIGN_VAR(op, @type@, 32) + op[i] = ip1[i] @OP@ ip2[0]; + if (npy_is_aligned(&ip1[i], 32)) { + LOOP_BLOCKED(@type@, 32) { + @vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); + @vpre256@_store_@vsuf@(&op[i], c); + } + } + else { + LOOP_BLOCKED(@type@, 32) { + @vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]); + @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); + @vpre256@_store_@vsuf@(&op[i], c); + } + } +#else const @vtype@ b = @vpre@_set1_@vsuf@(ip2[0]); LOOP_BLOCK_ALIGN_VAR(op, @type@, 16) op[i] = ip1[i] @OP@ ip2[0]; @@ -525,6 +715,7 @@ sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_i @vpre@_store_@vsuf@(&op[i], c); } } +#endif LOOP_BLOCKED_END { op[i] = ip1[i] @OP@ ip2[0]; } @@ -828,7 +1019,7 @@ sse2_@kind@_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n) static void sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n) { - const size_t stride = 16 / sizeof(@type@); + const npy_intp stride = 16 / (npy_intp)sizeof(@type@); LOOP_BLOCK_ALIGN_VAR(ip, @type@, 16) { *op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i]; } diff --git a/numpy/core/src/umath/test_rational.c.src b/numpy/core/src/umath/test_rational.c.src index 26c3d3799..ffc92b732 100644 --- a/numpy/core/src/umath/test_rational.c.src +++ b/numpy/core/src/umath/test_rational.c.src @@ -394,14 +394,14 @@ pyrational_new(PyTypeObject* type, PyObject* args, PyObject* kwds) { return 0; } size = PyTuple_GET_SIZE(args); - if (size>2) { + if (size > 2) { PyErr_SetString(PyExc_TypeError, "expected rational or numerator and optional denominator"); return 0; } - x[0] = PyTuple_GET_ITEM(args,0); - x[1] = PyTuple_GET_ITEM(args,1); - if (size==1) { + + if (size == 1) { + x[0] = PyTuple_GET_ITEM(args, 0); if (PyRational_Check(x[0])) { Py_INCREF(x[0]); return x[0]; @@ -424,9 +424,11 @@ pyrational_new(PyTypeObject* type, PyObject* args, PyObject* kwds) { return 0; } } - for (i=0;i<size;i++) { + + for (i=0; i<size; i++) { PyObject* y; int eq; + x[i] = PyTuple_GET_ITEM(args, i); n[i] = PyInt_AsLong(x[i]); if (error_converting(n[i])) { if (PyErr_ExceptionMatches(PyExc_TypeError)) { diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 16693b366..c67f60752 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -70,16 +70,6 @@ static int _does_loop_use_arrays(void *data); -static int -assign_reduce_identity_zero(PyArrayObject *result, void *data); - -static int -assign_reduce_identity_minusone(PyArrayObject *result, void *data); - -static int -assign_reduce_identity_one(PyArrayObject *result, void *data); - - /*UFUNC_API*/ NPY_NO_EXPORT int PyUFunc_getfperr(void) @@ -136,94 +126,130 @@ PyUFunc_clearfperr() } /* - * This function analyzes the input arguments - * and determines an appropriate __array_prepare__ function to call - * for the outputs. - * Assumes subok is already true if check_subok is false. - * - * If an output argument is provided, then it is prepped - * with its own __array_prepare__ not with the one determined by - * the input arguments. - * - * if the provided output argument is already an ndarray, - * the prepping function is None (which means no prepping will - * be done --- not even PyArray_Return). - * - * A NULL is placed in output_prep for outputs that - * should just have PyArray_Return called. + * This function analyzes the input arguments and determines an appropriate + * method (__array_prepare__ or __array_wrap__) function to call, taking it + * from the input with the highest priority. Return NULL if no argument + * defines the method. */ -static void -_find_array_prepare(PyObject *args, PyObject *kwds, - PyObject **output_prep, int nin, int nout, - int check_subok) +static PyObject* +_find_array_method(PyObject *args, int nin, PyObject *method_name) { - Py_ssize_t nargs; - int i; - int np = 0; - PyObject *with_prep[NPY_MAXARGS], *preps[NPY_MAXARGS]; - PyObject *obj, *prep = NULL; + int i, n_methods; + PyObject *obj; + PyObject *with_method[NPY_MAXARGS], *methods[NPY_MAXARGS]; + PyObject *method = NULL; - /* - * If a 'subok' parameter is passed and isn't True, don't wrap - * if check_subok is false it assumed subok in kwds keyword is True - */ - if (check_subok && kwds != NULL && - (obj = PyDict_GetItem(kwds, npy_um_str_subok)) != NULL) { - if (obj != Py_True) { - for (i = 0; i < nout; i++) { - output_prep[i] = NULL; - } - return; - } - } - - nargs = PyTuple_GET_SIZE(args); + n_methods = 0; for (i = 0; i < nin; i++) { obj = PyTuple_GET_ITEM(args, i); if (PyArray_CheckExact(obj) || PyArray_IsAnyScalar(obj)) { continue; } - prep = PyObject_GetAttr(obj, npy_um_str_array_prepare); - if (prep) { - if (PyCallable_Check(prep)) { - with_prep[np] = obj; - preps[np] = prep; - ++np; + method = PyObject_GetAttr(obj, method_name); + if (method) { + if (PyCallable_Check(method)) { + with_method[n_methods] = obj; + methods[n_methods] = method; + ++n_methods; } else { - Py_DECREF(prep); - prep = NULL; + Py_DECREF(method); + method = NULL; } } else { PyErr_Clear(); } } - if (np > 0) { - /* If we have some preps defined, find the one of highest priority */ - prep = preps[0]; - if (np > 1) { - double maxpriority = PyArray_GetPriority(with_prep[0], - NPY_PRIORITY); - for (i = 1; i < np; ++i) { - double priority = PyArray_GetPriority(with_prep[i], - NPY_PRIORITY); + if (n_methods > 0) { + /* If we have some methods defined, find the one of highest priority */ + method = methods[0]; + if (n_methods > 1) { + double maxpriority = PyArray_GetPriority(with_method[0], + NPY_PRIORITY); + for (i = 1; i < n_methods; ++i) { + double priority = PyArray_GetPriority(with_method[i], + NPY_PRIORITY); if (priority > maxpriority) { maxpriority = priority; - Py_DECREF(prep); - prep = preps[i]; + Py_DECREF(method); + method = methods[i]; } else { - Py_DECREF(preps[i]); + Py_DECREF(methods[i]); } } } } + return method; +} + +/* + * Returns an incref'ed pointer to the proper __array_prepare__/__array_wrap__ + * method for a ufunc output argument, given the output argument `obj`, and the + * method chosen from the inputs `input_method`. + */ +static PyObject * +_get_output_array_method(PyObject *obj, PyObject *method, + PyObject *input_method) { + if (obj != Py_None) { + PyObject *ometh; + + if (PyArray_CheckExact(obj)) { + /* + * No need to wrap regular arrays - None signals to not call + * wrap/prepare at all + */ + Py_RETURN_NONE; + } + + ometh = PyObject_GetAttr(obj, method); + if (ometh == NULL) { + PyErr_Clear(); + } + else if (!PyCallable_Check(ometh)) { + Py_DECREF(ometh); + } + else { + /* Use the wrap/prepare method of the output if it's callable */ + return ometh; + } + } + + /* Fall back on the input's wrap/prepare */ + Py_XINCREF(input_method); + return input_method; +} + +/* + * This function analyzes the input arguments + * and determines an appropriate __array_prepare__ function to call + * for the outputs. + * + * If an output argument is provided, then it is prepped + * with its own __array_prepare__ not with the one determined by + * the input arguments. + * + * if the provided output argument is already an ndarray, + * the prepping function is None (which means no prepping will + * be done --- not even PyArray_Return). + * + * A NULL is placed in output_prep for outputs that + * should just have PyArray_Return called. + */ +static void +_find_array_prepare(PyObject *args, PyObject *kwds, + PyObject **output_prep, int nin, int nout) +{ + Py_ssize_t nargs; + int i; /* - * Here prep is the prepping function determined from the - * input arrays (could be NULL). - * + * Determine the prepping function given by the input arrays + * (could be NULL). + */ + PyObject *prep = _find_array_method(args, nin, npy_um_str_array_prepare); + /* * For all the output arrays decide what to do. * * 1) Use the prep function determined from the input arrays @@ -235,11 +261,10 @@ _find_array_prepare(PyObject *args, PyObject *kwds, * exact ndarray so that no PyArray_Return is * done in that case. */ + nargs = PyTuple_GET_SIZE(args); for (i = 0; i < nout; i++) { int j = nin + i; - int incref = 1; - output_prep[i] = prep; - obj = NULL; + PyObject *obj = NULL; if (j < nargs) { obj = PyTuple_GET_ITEM(args, j); /* Output argument one may also be in a keyword argument */ @@ -252,27 +277,13 @@ _find_array_prepare(PyObject *args, PyObject *kwds, obj = PyDict_GetItem(kwds, npy_um_str_out); } - if (obj != Py_None && obj != NULL) { - if (PyArray_CheckExact(obj)) { - /* None signals to not call any wrapping */ - output_prep[i] = Py_None; - } - else { - PyObject *oprep = PyObject_GetAttr(obj, - npy_um_str_array_prepare); - incref = 0; - if (!(oprep) || !(PyCallable_Check(oprep))) { - Py_XDECREF(oprep); - oprep = prep; - incref = 1; - PyErr_Clear(); - } - output_prep[i] = oprep; - } + if (obj == NULL) { + Py_XINCREF(prep); + output_prep[i] = prep; } - - if (incref) { - Py_XINCREF(output_prep[i]); + else { + output_prep[i] = _get_output_array_method( + obj, npy_um_str_array_prepare, prep); } } Py_XDECREF(prep); @@ -1177,7 +1188,7 @@ iterator_loop(PyUFuncObject *ufunc, PyUFuncGenericFunction innerloop, void *innerloopdata) { - npy_intp i, nin = ufunc->nin, nout = ufunc->nout; + npy_intp i, iop, nin = ufunc->nin, nout = ufunc->nout; npy_intp nop = nin + nout; npy_uint32 op_flags[NPY_MAXARGS]; NpyIter *iter; @@ -1263,6 +1274,12 @@ iterator_loop(PyUFuncObject *ufunc, /* Call the __array_prepare__ functions for the new array */ if (prepare_ufunc_output(ufunc, &op[nin+i], arr_prep[i], arr_prep_args, i) < 0) { + for(iop = 0; iop < nin+i; ++iop) { + if (op_it[iop] != op[iop]) { + /* ignore errors */ + PyArray_ResolveWritebackIfCopy(op_it[iop]); + } + } NpyIter_Deallocate(iter); return -1; } @@ -1315,7 +1332,11 @@ iterator_loop(PyUFuncObject *ufunc, NPY_END_THREADS; } - + for(iop = 0; iop < nop; ++iop) { + if (op_it[iop] != op[iop]) { + PyArray_ResolveWritebackIfCopy(op_it[iop]); + } + } NpyIter_Deallocate(iter); return 0; } @@ -1502,7 +1523,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc, PyObject **arr_prep, PyObject *arr_prep_args) { - int i, nin = ufunc->nin, nout = ufunc->nout; + int retval, i, nin = ufunc->nin, nout = ufunc->nout; int nop = nin + nout; npy_uint32 op_flags[NPY_MAXARGS]; NpyIter *iter; @@ -1688,8 +1709,16 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc, NPY_AUXDATA_FREE(innerloopdata); } + retval = 0; + nop = NpyIter_GetNOp(iter); + for(i=0; i< nop; ++i) { + if (PyArray_ResolveWritebackIfCopy(NpyIter_GetOperandArray(iter)[i]) < 0) { + retval = -1; + } + } + NpyIter_Deallocate(iter); - return 0; + return retval; } static PyObject * @@ -1733,7 +1762,7 @@ make_arr_prep_args(npy_intp nin, PyObject *args, PyObject *kwds) /* * Validate the core dimensions of all the operands, and collect all of * the labelled core dimensions into 'core_dim_sizes'. - * + * * Returns 0 on success, and -1 on failure * * The behavior has been changed in NumPy 1.10.0, and the following @@ -1847,6 +1876,42 @@ _get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op, return 0; } +/* + * Returns a new reference + * TODO: store a reference in the ufunc object itself, rather than + * constructing one each time + */ +static PyObject * +_get_identity(PyUFuncObject *ufunc, npy_bool *reorderable) { + switch(ufunc->identity) { + case PyUFunc_One: + *reorderable = 1; + return PyInt_FromLong(1); + + case PyUFunc_Zero: + *reorderable = 1; + return PyInt_FromLong(0); + + case PyUFunc_MinusOne: + *reorderable = 1; + return PyInt_FromLong(-1); + + case PyUFunc_ReorderableNone: + *reorderable = 1; + Py_RETURN_NONE; + + case PyUFunc_None: + *reorderable = 0; + Py_RETURN_NONE; + + default: + PyErr_Format(PyExc_ValueError, + "ufunc %s has an invalid identity", ufunc_get_name_cstr(ufunc)); + return NULL; + } +} + + static int PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, @@ -2057,7 +2122,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, * Get the appropriate __array_prepare__ function to call * for each output */ - _find_array_prepare(args, kwds, arr_prep, nin, nout, 0); + _find_array_prepare(args, kwds, arr_prep, nin, nout); /* Set up arr_prep_args if a prep function was needed */ for (i = 0; i < nout; ++i) { @@ -2249,34 +2314,27 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, * product of two zero-length arrays will be a scalar, * which has size one. */ + npy_bool reorderable; + PyObject *identity = _get_identity(ufunc, &reorderable); + if (identity == NULL) { + retval = -1; + goto fail; + } + for (i = nin; i < nop; ++i) { if (PyArray_SIZE(op[i]) != 0) { - switch (ufunc->identity) { - case PyUFunc_Zero: - assign_reduce_identity_zero(op[i], NULL); - break; - case PyUFunc_One: - assign_reduce_identity_one(op[i], NULL); - break; - case PyUFunc_MinusOne: - assign_reduce_identity_minusone(op[i], NULL); - break; - case PyUFunc_None: - case PyUFunc_ReorderableNone: - PyErr_Format(PyExc_ValueError, - "ufunc %s ", - ufunc_name); - retval = -1; - goto fail; - default: - PyErr_Format(PyExc_ValueError, - "ufunc %s has an invalid identity for reduction", - ufunc_name); - retval = -1; - goto fail; + if (identity == Py_None) { + PyErr_Format(PyExc_ValueError, + "ufunc %s ", + ufunc_name); + Py_DECREF(identity); + retval = -1; + goto fail; } + PyArray_FillWithScalar(op[i], identity); } } + Py_DECREF(identity); } /* Check whether any errors occurred during the loop */ @@ -2286,6 +2344,11 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, goto fail; } + /* Write back any temporary data from PyArray_SetWritebackIfCopyBase */ + for(i=nin; i< nop; ++i) + if (PyArray_ResolveWritebackIfCopy(NpyIter_GetOperandArray(iter)[i]) < 0) + goto fail; + PyArray_free(inner_strides); NpyIter_Deallocate(iter); /* The caller takes ownership of all the references in op */ @@ -2444,7 +2507,7 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc, * Get the appropriate __array_prepare__ function to call * for each output */ - _find_array_prepare(args, kwds, arr_prep, nin, nout, 0); + _find_array_prepare(args, kwds, arr_prep, nin, nout); /* Set up arr_prep_args if a prep function was needed */ for (i = 0; i < nout; ++i) { @@ -2666,31 +2729,6 @@ reduce_type_resolver(PyUFuncObject *ufunc, PyArrayObject *arr, } static int -assign_reduce_identity_zero(PyArrayObject *result, void *NPY_UNUSED(data)) -{ - return PyArray_FillWithScalar(result, PyArrayScalar_False); -} - -static int -assign_reduce_identity_one(PyArrayObject *result, void *NPY_UNUSED(data)) -{ - return PyArray_FillWithScalar(result, PyArrayScalar_True); -} - -static int -assign_reduce_identity_minusone(PyArrayObject *result, void *NPY_UNUSED(data)) -{ - static PyObject *MinusOne = NULL; - - if (MinusOne == NULL) { - if ((MinusOne = PyInt_FromLong(-1)) == NULL) { - return -1; - } - } - return PyArray_FillWithScalar(result, MinusOne); -} - -static int reduce_loop(NpyIter *iter, char **dataptrs, npy_intp *strides, npy_intp *countptr, NpyIter_IterNextFunc *iternext, int needs_api, npy_intp skip_first_count, void *data) @@ -2795,11 +2833,12 @@ static PyArrayObject * PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, int naxes, int *axes, PyArray_Descr *odtype, int keepdims) { - int iaxes, reorderable, ndim; + int iaxes, ndim; + npy_bool reorderable; npy_bool axis_flags[NPY_MAXDIMS]; PyArray_Descr *dtype; PyArrayObject *result; - PyArray_AssignReduceIdentityFunc *assign_identity = NULL; + PyObject *identity = NULL; const char *ufunc_name = ufunc_get_name_cstr(ufunc); /* These parameters come from a TLS global */ int buffersize = 0, errormask = 0; @@ -2820,60 +2859,28 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, axis_flags[axis] = 1; } - switch (ufunc->identity) { - case PyUFunc_Zero: - assign_identity = &assign_reduce_identity_zero; - reorderable = 1; - /* - * The identity for a dynamic dtype like - * object arrays can't be used in general - */ - if (PyArray_ISOBJECT(arr) && PyArray_SIZE(arr) != 0) { - assign_identity = NULL; - } - break; - case PyUFunc_One: - assign_identity = &assign_reduce_identity_one; - reorderable = 1; - /* - * The identity for a dynamic dtype like - * object arrays can't be used in general - */ - if (PyArray_ISOBJECT(arr) && PyArray_SIZE(arr) != 0) { - assign_identity = NULL; - } - break; - case PyUFunc_MinusOne: - assign_identity = &assign_reduce_identity_minusone; - reorderable = 1; - /* - * The identity for a dynamic dtype like - * object arrays can't be used in general - */ - if (PyArray_ISOBJECT(arr) && PyArray_SIZE(arr) != 0) { - assign_identity = NULL; - } - break; - - case PyUFunc_None: - reorderable = 0; - break; - case PyUFunc_ReorderableNone: - reorderable = 1; - break; - default: - PyErr_Format(PyExc_ValueError, - "ufunc %s has an invalid identity for reduction", - ufunc_name); - return NULL; + if (_get_bufsize_errmask(NULL, "reduce", &buffersize, &errormask) < 0) { + return NULL; } - if (_get_bufsize_errmask(NULL, "reduce", &buffersize, &errormask) < 0) { + /* Get the identity */ + identity = _get_identity(ufunc, &reorderable); + if (identity == NULL) { return NULL; } + /* + * The identity for a dynamic dtype like + * object arrays can't be used in general + */ + if (identity != Py_None && PyArray_ISOBJECT(arr) && PyArray_SIZE(arr) != 0) { + Py_DECREF(identity); + identity = Py_None; + Py_INCREF(identity); + } /* Get the reduction dtype */ if (reduce_type_resolver(ufunc, arr, odtype, &dtype) < 0) { + Py_DECREF(identity); return NULL; } @@ -2881,11 +2888,12 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, NPY_UNSAFE_CASTING, axis_flags, reorderable, keepdims, 0, - assign_identity, + identity, reduce_loop, ufunc, buffersize, ufunc_name, errormask); Py_DECREF(dtype); + Py_DECREF(identity); return result; } @@ -3218,6 +3226,9 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, } finish: + /* Write back any temporary data from PyArray_SetWritebackIfCopyBase */ + if (PyArray_ResolveWritebackIfCopy(op[0]) < 0) + goto fail; Py_XDECREF(op_dtypes[0]); NpyIter_Deallocate(iter); NpyIter_Deallocate(iter_inner); @@ -3600,6 +3611,9 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, } finish: + if (op[0] && PyArray_ResolveWritebackIfCopy(op[0]) < 0) { + goto fail; + } Py_XDECREF(op_dtypes[0]); NpyIter_Deallocate(iter); @@ -3627,7 +3641,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, int i, naxes=0, ndim; int axes[NPY_MAXDIMS]; PyObject *axes_in = NULL; - PyArrayObject *mp, *ret = NULL; + PyArrayObject *mp = NULL, *ret = NULL; PyObject *op, *res = NULL; PyObject *obj_ind, *context; PyArrayObject *indices = NULL; @@ -3678,24 +3692,22 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, PyDict_SetItem(kwds, npy_um_str_out, out_obj); } } - + if (operation == UFUNC_REDUCEAT) { PyArray_Descr *indtype; indtype = PyArray_DescrFromType(NPY_INTP); if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|OO&O&:reduceat", reduceat_kwlist, - &op, - &obj_ind, - &axes_in, - PyArray_DescrConverter2, &otype, - PyArray_OutputConverter, &out)) { - Py_XDECREF(otype); - return NULL; + &op, + &obj_ind, + &axes_in, + PyArray_DescrConverter2, &otype, + PyArray_OutputConverter, &out)) { + goto fail; } indices = (PyArrayObject *)PyArray_FromAny(obj_ind, indtype, 1, 1, NPY_ARRAY_CARRAY, NULL); if (indices == NULL) { - Py_XDECREF(otype); - return NULL; + goto fail; } } else if (operation == UFUNC_ACCUMULATE) { @@ -3705,8 +3717,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, &axes_in, PyArray_DescrConverter2, &otype, PyArray_OutputConverter, &out)) { - Py_XDECREF(otype); - return NULL; + goto fail; } } else { @@ -3717,8 +3728,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, PyArray_DescrConverter2, &otype, PyArray_OutputConverter, &out, &keepdims)) { - Py_XDECREF(otype); - return NULL; + goto fail; } } /* Ensure input is an array */ @@ -3731,7 +3741,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, mp = (PyArrayObject *)PyArray_FromAny(op, NULL, 0, 0, 0, context); Py_XDECREF(context); if (mp == NULL) { - return NULL; + goto fail; } ndim = PyArray_NDIM(mp); @@ -3742,9 +3752,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, PyErr_Format(PyExc_TypeError, "cannot perform %s with flexible type", _reduce_type[operation]); - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; + goto fail; } /* Convert the 'axis' parameter into a list of axes */ @@ -3764,22 +3772,16 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, if (naxes < 0 || naxes > NPY_MAXDIMS) { PyErr_SetString(PyExc_ValueError, "too many values for 'axis'"); - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; + goto fail; } for (i = 0; i < naxes; ++i) { PyObject *tmp = PyTuple_GET_ITEM(axes_in, i); int axis = PyArray_PyIntAsInt(tmp); if (error_converting(axis)) { - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; + goto fail; } if (check_and_adjust_axis(&axis, ndim) < 0) { - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; + goto fail; } axes[i] = (int)axis; } @@ -3789,16 +3791,14 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, int axis = PyArray_PyIntAsInt(axes_in); /* TODO: PyNumber_Index would be good to use here */ if (error_converting(axis)) { - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; + goto fail; } /* Special case letting axis={0 or -1} slip through for scalars */ if (ndim == 0 && (axis == 0 || axis == -1)) { axis = 0; } else if (check_and_adjust_axis(&axis, ndim) < 0) { - return NULL; + goto fail; } axes[0] = (int)axis; naxes = 1; @@ -3818,9 +3818,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, (naxes == 0 || (naxes == 1 && axes[0] == 0)))) { PyErr_Format(PyExc_TypeError, "cannot %s on a scalar", _reduce_type[operation]); - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; + goto fail; } } @@ -3866,9 +3864,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, if (naxes != 1) { PyErr_SetString(PyExc_ValueError, "accumulate does not allow multiple axes"); - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; + goto fail; } ret = (PyArrayObject *)PyUFunc_Accumulate(ufunc, mp, out, axes[0], otype->type_num); @@ -3877,9 +3873,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, if (naxes != 1) { PyErr_SetString(PyExc_ValueError, "reduceat does not allow multiple axes"); - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; + goto fail; } ret = (PyArrayObject *)PyUFunc_Reduceat(ufunc, mp, indices, out, axes[0], otype->type_num); @@ -3912,38 +3906,11 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, } } return PyArray_Return(ret); -} - -/* - * Returns an incref'ed pointer to the proper wrapping object for a - * ufunc output argument, given the output argument 'out', and the - * input's wrapping function, 'wrap'. - */ -static PyObject* -_get_out_wrap(PyObject *out, PyObject *wrap) { - PyObject *owrap; - if (out == Py_None) { - /* Iterator allocated outputs get the input's wrapping */ - Py_XINCREF(wrap); - return wrap; - } - if (PyArray_CheckExact(out)) { - /* None signals to not call any wrapping */ - Py_RETURN_NONE; - } - /* - * For array subclasses use their __array_wrap__ method, or the - * input's wrapping if not available - */ - owrap = PyObject_GetAttr(out, npy_um_str_array_wrap); - if (owrap == NULL || !PyCallable_Check(owrap)) { - Py_XDECREF(owrap); - owrap = wrap; - Py_XINCREF(wrap); - PyErr_Clear(); - } - return owrap; +fail: + Py_XDECREF(otype); + Py_XDECREF(mp); + return NULL; } /* @@ -3968,9 +3935,8 @@ _find_array_wrap(PyObject *args, PyObject *kwds, { Py_ssize_t nargs; int i, idx_offset, start_idx; - int np = 0; - PyObject *with_wrap[NPY_MAXARGS], *wraps[NPY_MAXARGS]; - PyObject *obj, *wrap = NULL; + PyObject *obj; + PyObject *wrap = NULL; /* * If a 'subok' parameter is passed and isn't True, don't wrap but put None @@ -3984,53 +3950,13 @@ _find_array_wrap(PyObject *args, PyObject *kwds, } } - - for (i = 0; i < nin; i++) { - obj = PyTuple_GET_ITEM(args, i); - if (PyArray_CheckExact(obj) || PyArray_IsAnyScalar(obj)) { - continue; - } - wrap = PyObject_GetAttr(obj, npy_um_str_array_wrap); - if (wrap) { - if (PyCallable_Check(wrap)) { - with_wrap[np] = obj; - wraps[np] = wrap; - ++np; - } - else { - Py_DECREF(wrap); - wrap = NULL; - } - } - else { - PyErr_Clear(); - } - } - if (np > 0) { - /* If we have some wraps defined, find the one of highest priority */ - wrap = wraps[0]; - if (np > 1) { - double maxpriority = PyArray_GetPriority(with_wrap[0], - NPY_PRIORITY); - for (i = 1; i < np; ++i) { - double priority = PyArray_GetPriority(with_wrap[i], - NPY_PRIORITY); - if (priority > maxpriority) { - maxpriority = priority; - Py_DECREF(wrap); - wrap = wraps[i]; - } - else { - Py_DECREF(wraps[i]); - } - } - } - } + /* + * Determine the wrapping function given by the input arrays + * (could be NULL). + */ + wrap = _find_array_method(args, nin, npy_um_str_array_wrap); /* - * Here wrap is the wrapping function determined from the - * input arrays (could be NULL). - * * For all the output arrays decide what to do. * * 1) Use the wrap function determined from the input arrays @@ -4063,7 +3989,8 @@ handle_out: } else { /* If the kwarg is not a tuple then it is an array (or None) */ - output_wrap[0] = _get_out_wrap(obj, wrap); + output_wrap[0] = _get_output_array_method( + obj, npy_um_str_array_wrap, wrap); start_idx = 1; nargs = 1; } @@ -4074,8 +4001,8 @@ handle_out: int j = idx_offset + i; if (j < nargs) { - output_wrap[i] = _get_out_wrap(PyTuple_GET_ITEM(obj, j), - wrap); + output_wrap[i] = _get_output_array_method( + PyTuple_GET_ITEM(obj, j), npy_um_str_array_wrap, wrap); } else { output_wrap[i] = wrap; @@ -4100,29 +4027,27 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) PyObject *override = NULL; int errval; - /* - * Initialize all array objects to NULL to make cleanup easier - * if something goes wrong. - */ - for (i = 0; i < ufunc->nargs; i++) { - mps[i] = NULL; - } - errval = PyUFunc_CheckOverride(ufunc, "__call__", args, kwds, &override); if (errval) { return NULL; } else if (override) { - for (i = 0; i < ufunc->nargs; i++) { - PyArray_XDECREF_ERR(mps[i]); - } return override; } + /* + * Initialize all array objects to NULL to make cleanup easier + * if something goes wrong. + */ + for (i = 0; i < ufunc->nargs; i++) { + mps[i] = NULL; + } + errval = PyUFunc_GenericFunction(ufunc, args, kwds, mps); if (errval < 0) { for (i = 0; i < ufunc->nargs; i++) { - PyArray_XDECREF_ERR(mps[i]); + PyArray_DiscardWritebackIfCopy(mps[i]); + Py_XDECREF(mps[i]); } if (errval == -1) { return NULL; @@ -5215,6 +5140,9 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) NpyIter_Deallocate(iter_buffer); + if (op1_array != (PyArrayObject*)op1) { + PyArray_ResolveWritebackIfCopy(op1_array); + } Py_XDECREF(op2_array); Py_XDECREF(iter); Py_XDECREF(iter2); @@ -5231,6 +5159,9 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) fail: + if (op1_array != (PyArrayObject*)op1) { + PyArray_ResolveWritebackIfCopy(op1_array); + } Py_XDECREF(op2_array); Py_XDECREF(iter); Py_XDECREF(iter2); @@ -5377,15 +5308,8 @@ ufunc_get_name(PyUFuncObject *ufunc) static PyObject * ufunc_get_identity(PyUFuncObject *ufunc) { - switch(ufunc->identity) { - case PyUFunc_One: - return PyInt_FromLong(1); - case PyUFunc_Zero: - return PyInt_FromLong(0); - case PyUFunc_MinusOne: - return PyInt_FromLong(-1); - } - Py_RETURN_NONE; + npy_bool reorderable; + return _get_identity(ufunc, &reorderable); } static PyObject * diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c index 1a6cee030..03bf5bfd8 100644 --- a/numpy/core/src/umath/umathmodule.c +++ b/numpy/core/src/umath/umathmodule.c @@ -359,12 +359,10 @@ PyMODINIT_FUNC initumath(void) goto err; } - s = PyString_FromString("0.4.0"); - PyDict_SetItemString(d, "__version__", s); - Py_DECREF(s); - /* Load the ufunc operators into the array module's namespace */ - InitOperators(d); + if (InitOperators(d) < 0) { + goto err; + } PyDict_SetItemString(d, "pi", s = PyFloat_FromDouble(NPY_PI)); Py_DECREF(s); diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py index 1c935dcbc..88aaa3403 100644 --- a/numpy/core/tests/test_arrayprint.py +++ b/numpy/core/tests/test_arrayprint.py @@ -1,12 +1,13 @@ # -*- coding: utf-8 -*- from __future__ import division, absolute_import, print_function -import sys +import sys, gc import numpy as np from numpy.testing import ( - run_module_suite, assert_, assert_equal + run_module_suite, assert_, assert_equal, assert_raises, assert_warns ) +import textwrap class TestArrayRepr(object): def test_nan_inf(self): @@ -27,13 +28,61 @@ class TestArrayRepr(object): ' [3, 4]])') # two dimensional with flexible dtype - xstruct = np.ones((2,2), dtype=[('a', 'i4')]).view(sub) + xstruct = np.ones((2,2), dtype=[('a', '<i4')]).view(sub) assert_equal(repr(xstruct), "sub([[(1,), (1,)],\n" - " [(1,), (1,)]],\n" - " dtype=[('a', '<i4')])" + " [(1,), (1,)]], dtype=[('a', '<i4')])" ) + def test_0d_object_subclass(self): + # make sure that subclasses which return 0ds instead + # of scalars don't cause infinite recursion in str + class sub(np.ndarray): + def __new__(cls, inp): + obj = np.asarray(inp).view(cls) + return obj + + def __getitem__(self, ind): + ret = super(sub, self).__getitem__(ind) + return sub(ret) + + x = sub(1) + assert_equal(repr(x), 'sub(1)') + assert_equal(str(x), '1') + + x = sub([1, 1]) + assert_equal(repr(x), 'sub([1, 1])') + assert_equal(str(x), '[1 1]') + + # check it works properly with object arrays too + x = sub(None) + assert_equal(repr(x), 'sub(None, dtype=object)') + assert_equal(str(x), 'None') + + # plus recursive object arrays (even depth > 1) + y = sub(None) + x[()] = y + y[()] = x + assert_equal(repr(x), + 'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)') + assert_equal(str(x), '...') + + # nested 0d-subclass-object + x = sub(None) + x[()] = sub(None) + assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)') + assert_equal(str(x), 'None') + + # test that object + subclass is OK: + x = sub([None, None]) + assert_equal(repr(x), 'sub([None, None], dtype=object)') + assert_equal(str(x), '[None None]') + + x = sub([None, sub([None, None])]) + assert_equal(repr(x), + 'sub([None, sub([None, None], dtype=object)], dtype=object)') + assert_equal(str(x), '[None sub([None, None], dtype=object)]') + def test_self_containing(self): arr0d = np.array(None) arr0d[()] = arr0d @@ -64,6 +113,12 @@ class TestArrayRepr(object): # gh-9345 repr(np.void(b'test')) # RecursionError ? + def test_fieldless_structured(self): + # gh-10366 + no_fields = np.dtype([]) + arr_no_fields = np.empty(4, dtype=no_fields) + assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])') + class TestComplexArray(object): def test_str(self): @@ -117,7 +172,8 @@ class TestArray2String(object): """Basic test of array2string.""" a = np.arange(3) assert_(np.array2string(a) == '[0 1 2]') - assert_(np.array2string(a, max_line_width=4) == '[0 1\n 2]') + assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]') + assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]') def test_format_function(self): """Test custom format function for each element in array.""" @@ -157,20 +213,53 @@ class TestArray2String(object): assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) == '[abcabc defdef]') + # check for backcompat that using FloatFormat works and emits warning + with assert_warns(DeprecationWarning): + fmt = np.core.arrayprint.FloatFormat(x, 9, 'maxprec', False) + assert_equal(np.array2string(x, formatter={'float_kind': fmt}), + '[0. 1. 2.]') + def test_structure_format(self): dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) assert_equal(np.array2string(x), "[('Sarah', [8., 7.]) ('John', [6., 7.])]") - # for issue #5692 - A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) + np.set_printoptions(legacy='1.13') + try: + # for issue #5692 + A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) + A[5:].fill(np.datetime64('NaT')) + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',) + ('NaT',) ('NaT',) ('NaT',)]""") + ) + finally: + np.set_printoptions(legacy=False) + + # same again, but with non-legacy behavior + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ( 'NaT',) + ( 'NaT',) ( 'NaT',) + ( 'NaT',) ( 'NaT',)]""") + ) + + # and again, with timedeltas + A = np.full(10, 123456, dtype=[("A", "m8[s]")]) A[5:].fill(np.datetime64('NaT')) - assert_equal(np.array2string(A), - "[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) " + - "('1970-01-01T00:00:00',)\n ('1970-01-01T00:00:00',) " + - "('1970-01-01T00:00:00',) ('NaT',) ('NaT',)\n " + - "('NaT',) ('NaT',) ('NaT',)]") + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',) + ( 'NaT',) ( 'NaT',) ( 'NaT',)]""") + ) # See #8160 struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)]) @@ -186,6 +275,98 @@ class TestArray2String(object): (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8')) assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)") + def test_unstructured_void_repr(self): + a = np.array([27, 91, 50, 75, 7, 65, 10, 8, + 27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8') + assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')") + assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'") + assert_equal(repr(a), + r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n" + r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')") + + assert_equal(eval(repr(a), vars(np)), a) + assert_equal(eval(repr(a[0]), vars(np)), a[0]) + + def test_edgeitems_kwarg(self): + # previously the global print options would be taken over the kwarg + arr = np.zeros(3, int) + assert_equal( + np.array2string(arr, edgeitems=1, threshold=0), + "[0 ... 0]" + ) + + def test_summarize_1d(self): + A = np.arange(1001) + strA = '[ 0 1 2 ... 998 999 1000]' + assert_equal(str(A), strA) + + reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' + assert_equal(repr(A), reprA) + + def test_summarize_2d(self): + A = np.arange(1002).reshape(2, 501) + strA = '[[ 0 1 2 ... 498 499 500]\n' \ + ' [ 501 502 503 ... 999 1000 1001]]' + assert_equal(str(A), strA) + + reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ + ' [ 501, 502, 503, ..., 999, 1000, 1001]])' + assert_equal(repr(A), reprA) + + def test_linewidth(self): + a = np.full(6, 1) + + def make_str(a, width, **kw): + return np.array2string(a, separator="", max_line_width=width, **kw) + + assert_equal(make_str(a, 8, legacy='1.13'), '[111111]') + assert_equal(make_str(a, 7, legacy='1.13'), '[111111]') + assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n' + ' 11]') + + assert_equal(make_str(a, 8), '[111111]') + assert_equal(make_str(a, 7), '[11111\n' + ' 1]') + assert_equal(make_str(a, 5), '[111\n' + ' 111]') + + b = a[None,None,:] + + assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]') + assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]') + assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n' + ' 1]]]') + + assert_equal(make_str(b, 12), '[[[111111]]]') + assert_equal(make_str(b, 9), '[[[111\n' + ' 111]]]') + assert_equal(make_str(b, 8), '[[[11\n' + ' 11\n' + ' 11]]]') + + def test_wide_element(self): + a = np.array(['xxxxx']) + assert_equal( + np.array2string(a, max_line_width=5), + "['xxxxx']" + ) + assert_equal( + np.array2string(a, max_line_width=5, legacy='1.13'), + "[ 'xxxxx']" + ) + + def test_refcount(self): + # make sure we do not hold references to the array due to a recursive + # closure (gh-10620) + gc.disable() + a = np.arange(2) + r1 = sys.getrefcount(a) + np.array2string(a) + np.array2string(a) + r2 = sys.getrefcount(a) + gc.collect() + gc.enable() + assert_(r1 == r2) class TestPrintOptions(object): """Test getting and setting global print options.""" @@ -241,12 +422,39 @@ class TestPrintOptions(object): assert_equal(repr(x), "array([0., 1., 2.])") def test_0d_arrays(self): + unicode = type(u'') + + assert_equal(unicode(np.array(u'café', '<U4')), u'café') + + if sys.version_info[0] >= 3: + assert_equal(repr(np.array('café', '<U4')), + "array('café', dtype='<U4')") + else: + assert_equal(repr(np.array(u'café', '<U4')), + "array(u'caf\\xe9', dtype='<U4')") + assert_equal(str(np.array('test', np.str_)), 'test') + + a = np.zeros(1, dtype=[('a', '<i4', (3,))]) + assert_equal(str(a[0]), '([0, 0, 0],)') + assert_equal(repr(np.datetime64('2005-02-25')[...]), "array('2005-02-25', dtype='datetime64[D]')") + assert_equal(repr(np.timedelta64('10', 'Y')[...]), + "array(10, dtype='timedelta64[Y]')") + + # repr of 0d arrays is affected by printoptions x = np.array(1) np.set_printoptions(formatter={'all':lambda x: "test"}) assert_equal(repr(x), "array(test)") + # str is unaffected + assert_equal(str(x), "1") + + # check `style` arg raises + assert_warns(DeprecationWarning, np.array2string, + np.array(1.), style=repr) + # but not in legacy mode + np.array2string(np.array(1.), style=repr, legacy='1.13') def test_float_spacing(self): x = np.array([1., 2., 3.]) @@ -274,44 +482,66 @@ class TestPrintOptions(object): def test_bool_spacing(self): assert_equal(repr(np.array([True, True])), - 'array([ True, True], dtype=bool)') + 'array([ True, True])') assert_equal(repr(np.array([True, False])), - 'array([ True, False], dtype=bool)') + 'array([ True, False])') assert_equal(repr(np.array([True])), - 'array([ True], dtype=bool)') + 'array([ True])') assert_equal(repr(np.array(True)), - 'array(True, dtype=bool)') + 'array(True)') assert_equal(repr(np.array(False)), - 'array(False, dtype=bool)') + 'array(False)') def test_sign_spacing(self): a = np.arange(4.) b = np.array([1.234e9]) + c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16') assert_equal(repr(a), 'array([0., 1., 2., 3.])') assert_equal(repr(np.array(1.)), 'array(1.)') assert_equal(repr(b), 'array([1.234e+09])') + assert_equal(repr(np.array([0.])), 'array([0.])') + assert_equal(repr(c), + "array([1. +1.j , 1.12345679+1.12345679j])") + assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])') np.set_printoptions(sign=' ') assert_equal(repr(a), 'array([ 0., 1., 2., 3.])') assert_equal(repr(np.array(1.)), 'array( 1.)') assert_equal(repr(b), 'array([ 1.234e+09])') + assert_equal(repr(c), + "array([ 1. +1.j , 1.12345679+1.12345679j])") + assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])') np.set_printoptions(sign='+') assert_equal(repr(a), 'array([+0., +1., +2., +3.])') assert_equal(repr(np.array(1.)), 'array(+1.)') assert_equal(repr(b), 'array([+1.234e+09])') + assert_equal(repr(c), + "array([+1. +1.j , +1.12345679+1.12345679j])") - np.set_printoptions(sign='legacy') + np.set_printoptions(legacy='1.13') assert_equal(repr(a), 'array([ 0., 1., 2., 3.])') - assert_equal(repr(np.array(1.)), 'array(1.)') assert_equal(repr(b), 'array([ 1.23400000e+09])') assert_equal(repr(-b), 'array([ -1.23400000e+09])') + assert_equal(repr(np.array(1.)), 'array(1.0)') + assert_equal(repr(np.array([0.])), 'array([ 0.])') + assert_equal(repr(c), + "array([ 1.00000000+1.j , 1.12345679+1.12345679j])") + # gh-10383 + assert_equal(str(np.array([-1., 10])), "[ -1. 10.]") + + assert_raises(TypeError, np.set_printoptions, wrongarg=True) + + def test_float_overflow_nowarn(self): + # make sure internal computations in FloatingFormat don't + # warn about overflow + repr(np.array([1e4, 0.1], dtype='f2')) def test_sign_spacing_structured(self): - a = np.ones(2, dtype='f,f') - assert_equal(repr(a), "array([(1., 1.), (1., 1.)],\n" - " dtype=[('f0', '<f4'), ('f1', '<f4')])") + a = np.ones(2, dtype='<f,<f') + assert_equal(repr(a), + "array([(1., 1.), (1., 1.)], dtype=[('f0', '<f4'), ('f1', '<f4')])") assert_equal(repr(a[0]), "(1., 1.)") def test_floatmode(self): @@ -323,6 +553,7 @@ class TestPrintOptions(object): 0.0862072768214508, 0.39112753029631175], dtype=np.float64) z = np.arange(6, dtype=np.float16)/10 + c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16') # also make sure 1e23 is right (is between two fp numbers) w = np.array(['1e{}'.format(i) for i in range(25)], dtype=np.float64) @@ -348,6 +579,8 @@ class TestPrintOptions(object): " 1.e+16, 1.e+17, 1.e+18, 1.e+19, 1.e+20, 1.e+21, 1.e+22, 1.e+23,\n" " 1.e+24])") assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])") + assert_equal(repr(c), + "array([1. +1.j , 1.123456789+1.123456789j])") # maxprec mode, precision=8 np.set_printoptions(floatmode='maxprec', precision=8) @@ -362,6 +595,8 @@ class TestPrintOptions(object): assert_equal(repr(w[::5]), "array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])") assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])") + assert_equal(repr(c), + "array([1. +1.j , 1.12345679+1.12345679j])") # fixed mode, precision=4 np.set_printoptions(floatmode='fixed', precision=4) @@ -375,6 +610,9 @@ class TestPrintOptions(object): assert_equal(repr(w[::5]), "array([1.0000e+00, 1.0000e+05, 1.0000e+10, 1.0000e+15, 1.0000e+20])") assert_equal(repr(wp), "array([1.2340e+001, 1.0000e+002, 1.0000e+123])") + assert_equal(repr(np.zeros(3)), "array([0.0000, 0.0000, 0.0000])") + assert_equal(repr(c), + "array([1.0000+1.0000j, 1.1235+1.1235j])") # for larger precision, representation error becomes more apparent: np.set_printoptions(floatmode='fixed', precision=8) assert_equal(repr(z), @@ -394,6 +632,168 @@ class TestPrintOptions(object): assert_equal(repr(w[::5]), "array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])") assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])") + assert_equal(repr(c), + "array([1.00000000+1.00000000j, 1.12345679+1.12345679j])") + + def test_legacy_mode_scalars(self): + # in legacy mode, str of floats get truncated, and complex scalars + # use * for non-finite imaginary part + np.set_printoptions(legacy='1.13') + assert_equal(str(np.float64(1.123456789123456789)), '1.12345678912') + assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nan*j)') + + np.set_printoptions(legacy=False) + assert_equal(str(np.float64(1.123456789123456789)), + '1.1234567891234568') + assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nanj)') + + def test_legacy_stray_comma(self): + np.set_printoptions(legacy='1.13') + assert_equal(str(np.arange(10000)), '[ 0 1 2 ..., 9997 9998 9999]') + + np.set_printoptions(legacy=False) + assert_equal(str(np.arange(10000)), '[ 0 1 2 ... 9997 9998 9999]') + + def test_dtype_linewidth_wrapping(self): + np.set_printoptions(linewidth=75) + assert_equal(repr(np.arange(10,20., dtype='f4')), + "array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], dtype=float32)") + assert_equal(repr(np.arange(10,23., dtype='f4')), textwrap.dedent("""\ + array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.], + dtype=float32)""")) + + styp = '<U4' if sys.version_info[0] >= 3 else '|S4' + assert_equal(repr(np.ones(3, dtype=styp)), + "array(['1', '1', '1'], dtype='{}')".format(styp)) + assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\ + array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'], + dtype='{}')""".format(styp))) + + def test_linewidth_repr(self): + a = np.full(7, fill_value=2) + np.set_printoptions(linewidth=17) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, + 2])""") + ) + np.set_printoptions(linewidth=17, legacy='1.13') + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, 2])""") + ) + + a = np.full(8, fill_value=2) + + np.set_printoptions(linewidth=18, legacy=False) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, + 2, 2])""") + ) + + np.set_printoptions(linewidth=18, legacy='1.13') + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, 2, + 2, 2, 2, 2])""") + ) + + def test_linewidth_str(self): + a = np.full(18, fill_value=2) + np.set_printoptions(linewidth=18) + assert_equal( + str(a), + textwrap.dedent("""\ + [2 2 2 2 2 2 2 2 + 2 2 2 2 2 2 2 2 + 2 2]""") + ) + np.set_printoptions(linewidth=18, legacy='1.13') + assert_equal( + str(a), + textwrap.dedent("""\ + [2 2 2 2 2 2 2 2 2 + 2 2 2 2 2 2 2 2 2]""") + ) + + def test_edgeitems(self): + np.set_printoptions(edgeitems=1, threshold=1) + a = np.arange(27).reshape((3, 3, 3)) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([[[ 0, ..., 2], + ..., + [ 6, ..., 8]], + + ..., + + [[18, ..., 20], + ..., + [24, ..., 26]]])""") + ) + + b = np.zeros((3, 3, 1, 1)) + assert_equal( + repr(b), + textwrap.dedent("""\ + array([[[[0.]], + + ..., + + [[0.]]], + + + ..., + + + [[[0.]], + + ..., + + [[0.]]]])""") + ) + + # 1.13 had extra trailing spaces, and was missing newlines + np.set_printoptions(legacy='1.13') + + assert_equal( + repr(a), + textwrap.dedent("""\ + array([[[ 0, ..., 2], + ..., + [ 6, ..., 8]], + + ..., + [[18, ..., 20], + ..., + [24, ..., 26]]])""") + ) + + assert_equal( + repr(b), + textwrap.dedent("""\ + array([[[[ 0.]], + + ..., + [[ 0.]]], + + + ..., + [[[ 0.]], + + ..., + [[ 0.]]]])""") + ) + def test_unicode_object_array(): import sys @@ -405,5 +805,37 @@ def test_unicode_object_array(): assert_equal(repr(x), expected) +class TestContextManager(object): + def test_ctx_mgr(self): + # test that context manager actuall works + with np.printoptions(precision=2): + s = str(np.array([2.0]) / 3) + assert_equal(s, '[0.67]') + + def test_ctx_mgr_restores(self): + # test that print options are actually restrored + opts = np.get_printoptions() + with np.printoptions(precision=opts['precision'] - 1, + linewidth=opts['linewidth'] - 4): + pass + assert_equal(np.get_printoptions(), opts) + + def test_ctx_mgr_exceptions(self): + # test that print options are restored even if an exception is raised + opts = np.get_printoptions() + try: + with np.printoptions(precision=2, linewidth=11): + raise ValueError + except ValueError: + pass + assert_equal(np.get_printoptions(), opts) + + def test_ctx_mgr_as_smth(self): + opts = {"precision": 2} + with np.printoptions(**opts) as ctx: + saved_opts = ctx.copy() + assert_equal({k: saved_opts[k] for k in opts}, opts) + + if __name__ == "__main__": run_module_suite() diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index dc84a039c..638994aee 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -565,7 +565,7 @@ class TestDateTime(object): # Check that one NaT doesn't corrupt subsequent entries a = np.array(['2010', 'NaT', '2030']).astype('M') - assert_equal(str(a), "['2010' 'NaT' '2030']") + assert_equal(str(a), "['2010' 'NaT' '2030']") def test_timedelta_array_str(self): a = np.array([-1, 0, 100], dtype='m') diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 1c1851fc7..fe0c7cc5f 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -434,6 +434,18 @@ class TestNPY_CHAR(_DeprecationTestCase): assert_(npy_char_deprecation() == 'S1') +class Test_UPDATEIFCOPY(_DeprecationTestCase): + """ + v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use + WRITEBACKIFCOPY instead + """ + def test_npy_updateifcopy_deprecation(self): + from numpy.core.multiarray_tests import npy_updateifcopy_deprecation + arr = np.arange(9).reshape(3, 3) + v = arr.T + self.assert_deprecated(npy_updateifcopy_deprecation, args=(v,)) + + class TestDatetimeEvent(_DeprecationTestCase): # 2017-08-11, 1.14.0 def test_3_tuple(self): diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index b48983e2e..2f997b4f7 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -40,7 +40,7 @@ class TestBuiltin(object): assert_(dt.byteorder != dt2.byteorder, "bogus test") assert_dtype_equal(dt, dt2) else: - self.assertTrue(dt.byteorder != dt3.byteorder, "bogus test") + assert_(dt.byteorder != dt3.byteorder, "bogus test") assert_dtype_equal(dt, dt3) def test_equivalent_dtype_hashing(self): @@ -719,5 +719,10 @@ def test_dtypes_are_true(): assert bool(np.dtype([('a', 'i8'), ('b', 'f4')])) +def test_invalid_dtype_string(): + # test for gh-10440 + assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]') + + if __name__ == "__main__": run_module_suite() diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py index da83bb8c4..9bd85fdb9 100644 --- a/numpy/core/tests/test_einsum.py +++ b/numpy/core/tests/test_einsum.py @@ -481,6 +481,25 @@ class TestEinSum(object): r = np.arange(4).reshape(2, 2) + 7 assert_equal(np.einsum('z,mz,zm->', p, q, r), 253) + # singleton dimensions broadcast (gh-10343) + p = np.ones((10,2)) + q = np.ones((1,2)) + assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), + np.einsum('ij,ij->j', p, q, optimize=False)) + assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), + [10.] * 2) + + p = np.ones((1, 5)) + q = np.ones((5, 5)) + for optimize in (True, False): + assert_array_equal(np.einsum("...ij,...jk->...ik", p, p, + optimize=optimize), + np.einsum("...ij,...jk->...ik", p, q, + optimize=optimize)) + assert_array_equal(np.einsum("...ij,...jk->...ik", p, q, + optimize=optimize), + np.full((1, 5), 5)) + def test_einsum_sums_int8(self): self.check_einsum_sums('i1') @@ -538,6 +557,13 @@ class TestEinSum(object): assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]]) + # Regression test for issue #10369 (test unicode inputs with Python 2) + assert_equal(np.einsum(u'ij...,j...->i...', a, b), [[[2], [2]]]) + assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20) + assert_equal(np.einsum(u'...i,...i', [1, 2, 3], [2, 3, 4]), 20) + assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4], + optimize=u'greedy'), 20) + # The iterator had an issue with buffering this reduction a = np.ones((5, 12, 4, 2, 3), np.int64) b = np.ones((5, 12, 11), np.int64) diff --git a/numpy/core/tests/test_getlimits.py b/numpy/core/tests/test_getlimits.py index 361246c7f..455f5257c 100644 --- a/numpy/core/tests/test_getlimits.py +++ b/numpy/core/tests/test_getlimits.py @@ -80,7 +80,7 @@ class TestRepr(object): assert_equal(repr(np.iinfo(np.int16)), expected) def test_finfo_repr(self): - expected = "finfo(resolution=0.000001, min=-3.4028235e+38," + \ + expected = "finfo(resolution=1e-06, min=-3.4028235e+38," + \ " max=3.4028235e+38, dtype=float32)" assert_equal(repr(np.finfo(np.float32)), expected) diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py index 7cfb81da7..df9eca627 100644 --- a/numpy/core/tests/test_indexing.py +++ b/numpy/core/tests/test_indexing.py @@ -10,7 +10,7 @@ from numpy.core.multiarray_tests import array_indexing from itertools import product from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_raises, - assert_array_equal, assert_warns, HAS_REFCOUNT + assert_array_equal, assert_warns, dec, HAS_REFCOUNT, suppress_warnings, ) @@ -622,6 +622,55 @@ class TestSubclasses(object): assert_array_equal(new_s.finalize_status, new_s) assert_array_equal(new_s.old, s) + @dec.skipif(not HAS_REFCOUNT) + def test_slice_decref_getsetslice(self): + # See gh-10066, a temporary slice object should be discarted. + # This test is only really interesting on Python 2 since + # it goes through `__set/getslice__` here and can probably be + # removed. Use 0:7 to make sure it is never None:7. + class KeepIndexObject(np.ndarray): + def __getitem__(self, indx): + self.indx = indx + if indx == slice(0, 7): + raise ValueError + + def __setitem__(self, indx, val): + self.indx = indx + if indx == slice(0, 4): + raise ValueError + + k = np.array([1]).view(KeepIndexObject) + k[0:5] + assert_equal(k.indx, slice(0, 5)) + assert_equal(sys.getrefcount(k.indx), 2) + try: + k[0:7] + raise AssertionError + except ValueError: + # The exception holds a reference to the slice so clear on Py2 + if hasattr(sys, 'exc_clear'): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + sys.exc_clear() + assert_equal(k.indx, slice(0, 7)) + assert_equal(sys.getrefcount(k.indx), 2) + + k[0:3] = 6 + assert_equal(k.indx, slice(0, 3)) + assert_equal(sys.getrefcount(k.indx), 2) + try: + k[0:4] = 2 + raise AssertionError + except ValueError: + # The exception holds a reference to the slice so clear on Py2 + if hasattr(sys, 'exc_clear'): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + sys.exc_clear() + assert_equal(k.indx, slice(0, 4)) + assert_equal(sys.getrefcount(k.indx), 2) + + class TestFancyIndexingCast(object): def test_boolean_index_cast_assign(self): # Setup the boolean index and float arrays. diff --git a/numpy/core/tests/test_item_selection.py b/numpy/core/tests/test_item_selection.py index a0a458ca5..642da426c 100644 --- a/numpy/core/tests/test_item_selection.py +++ b/numpy/core/tests/test_item_selection.py @@ -53,13 +53,13 @@ class TestTake(object): for mode in ('raise', 'clip', 'wrap'): a = np.array(objects) b = np.array([2, 2, 4, 5, 3, 5]) - a.take(b, out=a[:6]) + a.take(b, out=a[:6], mode=mode) del a if HAS_REFCOUNT: assert_(all(sys.getrefcount(o) == 3 for o in objects)) # not contiguous, example: a = np.array(objects * 2)[::2] - a.take(b, out=a[:6]) + a.take(b, out=a[:6], mode=mode) del a if HAS_REFCOUNT: assert_(all(sys.getrefcount(o) == 3 for o in objects)) diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py index 53d56b5e7..9c17ed210 100644 --- a/numpy/core/tests/test_mem_overlap.py +++ b/numpy/core/tests/test_mem_overlap.py @@ -94,7 +94,7 @@ def test_overlapping_assignments(): srcidx = tuple([a[0] for a in ind]) dstidx = tuple([a[1] for a in ind]) - yield _check_assignment, srcidx, dstidx + _check_assignment(srcidx, dstidx) @dec.slow diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index a02075a1e..661561ab3 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -18,11 +18,11 @@ if sys.version_info[0] >= 3: else: import __builtin__ as builtins from decimal import Decimal - +from unittest import TestCase import numpy as np from numpy.compat import strchar, unicode -from .test_print import in_foreign_locale +from numpy.core.tests.test_print import in_foreign_locale from numpy.core.multiarray_tests import ( test_neighborhood_iterator, test_neighborhood_iterator_oob, test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end, @@ -89,6 +89,7 @@ class TestFlags(object): def test_otherflags(self): assert_equal(self.a.flags.carray, True) + assert_equal(self.a.flags['C'], True) assert_equal(self.a.flags.farray, False) assert_equal(self.a.flags.behaved, True) assert_equal(self.a.flags.fnc, False) @@ -96,7 +97,13 @@ class TestFlags(object): assert_equal(self.a.flags.owndata, True) assert_equal(self.a.flags.writeable, True) assert_equal(self.a.flags.aligned, True) - assert_equal(self.a.flags.updateifcopy, False) + with assert_warns(DeprecationWarning): + assert_equal(self.a.flags.updateifcopy, False) + with assert_warns(DeprecationWarning): + assert_equal(self.a.flags['U'], False) + assert_equal(self.a.flags.writebackifcopy, False) + assert_equal(self.a.flags['X'], False) + def test_string_align(self): a = np.zeros(4, dtype=np.dtype('|S4')) @@ -451,6 +458,14 @@ class TestAssignment(object): arr = np.array([np.array(tinya)]) assert_equal(arr[0], tinya) + def test_cast_to_string(self): + # cast to str should do "str(scalar)", not "str(scalar.item())" + # Example: In python2, str(float) is truncated, so we want to avoid + # str(np.float64(...).item()) as this would incorrectly truncate. + a = np.zeros(1, dtype='S20') + a[:] = np.array(['1.12345678901234567890'], dtype='f8') + assert_equal(a[0], b"1.1234567890123457") + class TestDtypedescr(object): def test_construction(self): @@ -463,6 +478,16 @@ class TestDtypedescr(object): assert_(np.dtype('<i4') != np.dtype('>i4')) assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')])) + def test_structured_non_void(self): + fields = [('a', '<i2'), ('b', '<i2')] + dt_int = np.dtype(('i4', fields)) + assert_equal(str(dt_int), "(numpy.int32, [('a', '<i2'), ('b', '<i2')])") + + # gh-9821 + arr_int = np.zeros(4, dt_int) + assert_equal(repr(arr_int), + "array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))") + class TestZeroRank(object): def setup(self): @@ -1142,9 +1167,11 @@ class TestStructured(object): def test_multiindex_titles(self): a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')]) assert_raises(KeyError, lambda : a[['a','c']]) - assert_raises(KeyError, lambda : a[['b','b']]) + assert_raises(KeyError, lambda : a[['a','a']]) + assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated a[['b','c']] # no exception + class TestBool(object): def test_test_interning(self): a0 = np.bool_(0) @@ -1628,7 +1655,7 @@ class TestMethods(object): arr = np.array([0, datetime.now(), 1], dtype=object) for kind in ['q', 'm', 'h']: assert_raises(TypeError, arr.sort, kind=kind) - #gh-3879 + #gh-3879 class Raiser(object): def raises_anything(*args, **kwargs): raise TypeError("SOMETHING ERRORED") @@ -2618,6 +2645,10 @@ class TestMethods(object): assert_equal(a.diagonal(0), [0, 5, 10]) assert_equal(a.diagonal(1), [1, 6, 11]) assert_equal(a.diagonal(-1), [4, 9]) + assert_raises(np.AxisError, a.diagonal, axis1=0, axis2=5) + assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=0) + assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=5) + assert_raises(ValueError, a.diagonal, axis1=1, axis2=1) b = np.arange(8).reshape((2, 2, 2)) assert_equal(b.diagonal(), [[0, 6], [1, 7]]) @@ -2631,6 +2662,7 @@ class TestMethods(object): # Order of axis argument doesn't matter: assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]]) + def test_diagonal_view_notwriteable(self): # this test is only for 1.9, the diagonal view will be # writeable in 1.10. @@ -2847,10 +2879,10 @@ class TestMethods(object): assert_(a.flags['OWNDATA']) b = a.copy() # check exceptions - assert_raises(ValueError, a.swapaxes, -5, 0) - assert_raises(ValueError, a.swapaxes, 4, 0) - assert_raises(ValueError, a.swapaxes, 0, -5) - assert_raises(ValueError, a.swapaxes, 0, 4) + assert_raises(np.AxisError, a.swapaxes, -5, 0) + assert_raises(np.AxisError, a.swapaxes, 4, 0) + assert_raises(np.AxisError, a.swapaxes, 0, -5) + assert_raises(np.AxisError, a.swapaxes, 0, 4) for i in range(-4, 4): for j in range(-4, 4): @@ -3309,7 +3341,7 @@ class TestTemporaryElide(object): # def incref_elide_l(d): # return l[4] + l[4] # PyNumber_Add without increasing refcount from numpy.core.multiarray_tests import incref_elide_l - # padding with 1 makes sure the object on the stack is not overwriten + # padding with 1 makes sure the object on the stack is not overwritten l = [1, 1, 1, 1, np.ones(100000)] res = incref_elide_l(l) # the return original should not be changed to an inplace operation @@ -3419,10 +3451,11 @@ class TestPickling(object): assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a) def _loads(self, obj): + import pickle if sys.version_info[0] >= 3: - return np.loads(obj, encoding='latin1') + return pickle.loads(obj, encoding='latin1') else: - return np.loads(obj) + return pickle.loads(obj) # version 0 pickles, using protocol=2 to pickle # version 0 doesn't have a version field @@ -3988,7 +4021,7 @@ class TestPutmask(object): for types in np.sctypes.values(): for T in types: if T not in unchecked_types: - yield self.tst_basic, x.copy().astype(T), T, mask, val + self.tst_basic(x.copy().astype(T), T, mask, val) def test_mask_size(self): assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) @@ -4000,7 +4033,7 @@ class TestPutmask(object): def test_ip_byteorder(self): for dtype in ('>i4', '<i4'): - yield self.tst_byteorder, dtype + self.tst_byteorder(dtype) def test_record_array(self): # Note mixed byteorder. @@ -4029,7 +4062,7 @@ class TestTake(object): for types in np.sctypes.values(): for T in types: if T not in unchecked_types: - yield self.tst_basic, x.copy().astype(T) + self.tst_basic(x.copy().astype(T)) def test_raise(self): x = np.random.random(24)*100 @@ -4057,7 +4090,7 @@ class TestTake(object): def test_ip_byteorder(self): for dtype in ('>i4', '<i4'): - yield self.tst_byteorder, dtype + self.tst_byteorder(dtype) def test_record_array(self): # Note mixed byteorder. @@ -4443,10 +4476,10 @@ class TestFromBuffer(object): dt = np.dtype(dtype).newbyteorder(byteorder) x = (np.random.random((4, 7))*5).astype(dt) buf = x.tobytes() - yield self.tst_basic, buf, x.flat, {'dtype':dt} + self.tst_basic(buf, x.flat, {'dtype':dt}) def test_empty(self): - yield self.tst_basic, b'', np.array([]), {} + self.tst_basic(b'', np.array([]), {}) class TestFlat(object): @@ -4490,12 +4523,19 @@ class TestFlat(object): # UPDATEIFCOPY array returned for non-contiguous arrays. assert_(e.flags.writeable is True) assert_(f.flags.writeable is False) - - assert_(c.flags.updateifcopy is False) - assert_(d.flags.updateifcopy is False) - assert_(e.flags.updateifcopy is False) - # UPDATEIFCOPY is removed. - assert_(f.flags.updateifcopy is False) + with assert_warns(DeprecationWarning): + assert_(c.flags.updateifcopy is False) + with assert_warns(DeprecationWarning): + assert_(d.flags.updateifcopy is False) + with assert_warns(DeprecationWarning): + assert_(e.flags.updateifcopy is False) + with assert_warns(DeprecationWarning): + # UPDATEIFCOPY is removed. + assert_(f.flags.updateifcopy is False) + assert_(c.flags.writebackifcopy is False) + assert_(d.flags.writebackifcopy is False) + assert_(e.flags.writebackifcopy is False) + assert_(f.flags.writebackifcopy is False) class TestResize(object): @@ -5655,26 +5695,6 @@ class TestInner(object): assert_equal(np.inner(b, a).transpose(2,3,0,1), desired) -class TestSummarization(object): - def test_1d(self): - A = np.arange(1001) - strA = '[ 0 1 2 ... 998 999 1000]' - assert_(str(A) == strA) - - reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' - assert_(repr(A) == reprA) - - def test_2d(self): - A = np.arange(1002).reshape(2, 501) - strA = '[[ 0 1 2 ... 498 499 500]\n' \ - ' [ 501 502 503 ... 999 1000 1001]]' - assert_(str(A) == strA) - - reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ - ' [ 501, 502, 503, ..., 999, 1000, 1001]])' - assert_(repr(A) == reprA) - - class TestAlen(object): def test_basic(self): m = np.array([1, 2, 3]) @@ -6451,6 +6471,19 @@ class TestNewBufferProtocol(object): shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS']) assert_(strides[-1] == 8) + def test_out_of_order_fields(self): + dt = np.dtype(dict( + formats=['<i4', '<i4'], + names=['one', 'two'], + offsets=[4, 0], + itemsize=8 + )) + + # overlapping fields cannot be represented by PEP3118 + arr = np.empty(1, dt) + with assert_raises(ValueError): + memoryview(arr) + class TestArrayAttributeDeletion(object): @@ -6473,7 +6506,7 @@ class TestArrayAttributeDeletion(object): def test_multiarray_flags_writable_attribute_deletion(self): a = np.ones(2).flags - attr = ['updateifcopy', 'aligned', 'writeable'] + attr = ['writebackifcopy', 'updateifcopy', 'aligned', 'writeable'] for s in attr: assert_raises(AttributeError, delattr, a, s) @@ -6689,6 +6722,34 @@ class TestConversion(object): Error = RuntimeError # python < 3.5 assert_raises(Error, bool, self_containing) # previously stack overflow + def test_to_int_scalar(self): + # gh-9972 means that these aren't always the same + int_funcs = (int, lambda x: x.__int__()) + for int_func in int_funcs: + assert_equal(int_func(np.array([1])), 1) + assert_equal(int_func(np.array([0])), 0) + assert_equal(int_func(np.array([[42]])), 42) + assert_raises(TypeError, int_func, np.array([1, 2])) + + # gh-9972 + assert_equal(4, int_func(np.array('4'))) + assert_equal(5, int_func(np.bytes_(b'5'))) + assert_equal(6, int_func(np.unicode_(u'6'))) + + class HasTrunc: + def __trunc__(self): + return 3 + assert_equal(3, int_func(np.array(HasTrunc()))) + assert_equal(3, int_func(np.array([HasTrunc()]))) + + class NotConvertible(object): + def __int__(self): + raise NotImplementedError + assert_raises(NotImplementedError, + int_func, np.array(NotConvertible())) + assert_raises(NotImplementedError, + int_func, np.array([NotConvertible()])) + class TestWhere(object): def test_basic(self): @@ -6924,7 +6985,7 @@ class TestArrayPriority(object): op.ge, op.lt, op.le, op.ne, op.eq ] - # See #7949. Dont use "/" operator With -3 switch, since python reports it + # See #7949. Don't use "/" operator With -3 switch, since python reports it # as a DeprecationWarning if sys.version_info[0] < 3 and not sys.py3kwarning: binary_ops.append(op.div) @@ -7056,21 +7117,14 @@ class TestFormat(object): def test_1d_format(self): # until gh-5543, ensure that the behaviour matches what it used to be a = np.array([np.pi]) - - def ret_and_exc(f, *args, **kwargs): - try: - return f(*args, **kwargs), None - except Exception as e: - # exceptions don't compare equal, so return type and args - # which do - return None, (type(e), e.args) - - # Could switch on python version here, but all we care about is - # that the behaviour hasn't changed - assert_equal( - ret_and_exc(object.__format__, a, '30'), - ret_and_exc('{:30}'.format, a) - ) + if sys.version_info[:2] >= (3, 4): + assert_raises(TypeError, '{:30}'.format, a) + else: + with suppress_warnings() as sup: + sup.filter(PendingDeprecationWarning) + res = '{:30}'.format(a) + dst = object.__format__(a, '30') + assert_equal(res, dst) class TestCTypes(object): @@ -7094,6 +7148,108 @@ class TestCTypes(object): _internal.ctypes = ctypes +class TestWritebackIfCopy(TestCase): + # all these tests use the WRITEBACKIFCOPY mechanism + def test_argmax_with_out(self): + mat = np.eye(5) + out = np.empty(5, dtype='i2') + res = np.argmax(mat, 0, out=out) + assert_equal(res, range(5)) + + def test_argmin_with_out(self): + mat = -np.eye(5) + out = np.empty(5, dtype='i2') + res = np.argmin(mat, 0, out=out) + assert_equal(res, range(5)) + + def test_clip_with_out(self): + mat = np.eye(5) + out = np.eye(5, dtype='i2') + res = np.clip(mat, a_min=-10, a_max=0, out=out) + assert_equal(np.sum(out), 0) + + def test_insert_noncontiguous(self): + a = np.arange(6).reshape(2,3).T # force non-c-contiguous + # uses arr_insert + np.place(a, a>2, [44, 55]) + assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]])) + + def test_put_noncontiguous(self): + a = np.arange(6).reshape(2,3).T # force non-c-contiguous + np.put(a, [0, 2], [44, 55]) + assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]])) + + def test_putmask_noncontiguous(self): + a = np.arange(6).reshape(2,3).T # force non-c-contiguous + # uses arr_putmask + np.putmask(a, a>2, a**2) + assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]])) + + def test_take_mode_raise(self): + a = np.arange(6, dtype='int') + out = np.empty(2, dtype='int') + np.take(a, [0, 2], out=out, mode='raise') + assert_equal(out, np.array([0, 2])) + + def test_choose_mod_raise(self): + a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]]) + out = np.empty((3,3), dtype='int') + choices = [-10, 10] + np.choose(a, choices, out=out, mode='raise') + assert_equal(out, np.array([[ 10, -10, 10], + [-10, 10, -10], + [ 10, -10, 10]])) + + def test_flatiter__array__(self): + a = np.arange(9).reshape(3,3) + b = a.T.flat + c = b.__array__() + # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics + del c + + def test_dot_out(self): + # if HAVE_CBLAS, will use WRITEBACKIFCOPY + a = np.arange(9, dtype=float).reshape(3,3) + b = np.dot(a, a, out=a) + assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]])) + + def test_view_assign(self): + from numpy.core.multiarray_tests import npy_create_writebackifcopy, npy_resolve + arr = np.arange(9).reshape(3, 3).T + arr_wb = npy_create_writebackifcopy(arr) + assert_(arr_wb.flags.writebackifcopy) + assert_(arr_wb.base is arr) + arr_wb[:] = -100 + npy_resolve(arr_wb) + assert_equal(arr, -100) + # after resolve, the two arrays no longer reference each other + assert_(not arr_wb.ctypes.data == 0) + arr_wb[:] = 100 + assert_equal(arr, -100) + + +class TestArange(object): + def test_infinite(self): + assert_raises_regex( + ValueError, "size exceeded", + np.arange, 0, np.inf + ) + + def test_nan_step(self): + assert_raises_regex( + ValueError, "cannot compute length", + np.arange, 0, 1, np.nan + ) + + def test_zero_step(self): + assert_raises(ZeroDivisionError, np.arange, 0, 10, 0) + assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0) + + # empty range + assert_raises(ZeroDivisionError, np.arange, 0, 0, 0) + assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0) + + def test_orderconverter_with_nonASCII_unicode_ordering(): # gh-7475 a = np.arange(5) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 59e11f22e..f3f8706b5 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -2146,172 +2146,197 @@ def test_iter_no_broadcast(): assert_raises(ValueError, nditer, [a, b, c], [], [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) -def test_iter_nested_iters_basic(): - # Test nested iteration basic usage - a = arange(12).reshape(2, 3, 2) - i, j = np.nested_iters(a, [[0], [1, 2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) +class TestIterNested(object): - i, j = np.nested_iters(a, [[0, 1], [2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + def test_basic(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) - i, j = np.nested_iters(a, [[0, 2], [1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) -def test_iter_nested_iters_reorder(): - # Test nested iteration basic usage - a = arange(12).reshape(2, 3, 2) + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - # In 'K' order (default), it gets reordered - i, j = np.nested_iters(a, [[0], [2, 1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - i, j = np.nested_iters(a, [[1, 0], [2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + def test_reorder(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) - i, j = np.nested_iters(a, [[2, 0], [1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + # In 'K' order (default), it gets reordered + i, j = np.nested_iters(a, [[0], [2, 1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - # In 'C' order, it doesn't - i, j = np.nested_iters(a, [[0], [2, 1]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) + i, j = np.nested_iters(a, [[1, 0], [2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - i, j = np.nested_iters(a, [[1, 0], [2]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) + i, j = np.nested_iters(a, [[2, 0], [1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - i, j = np.nested_iters(a, [[2, 0], [1]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) + # In 'C' order, it doesn't + i, j = np.nested_iters(a, [[0], [2, 1]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) -def test_iter_nested_iters_flip_axes(): - # Test nested iteration with negative axes - a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] + i, j = np.nested_iters(a, [[1, 0], [2]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) - # In 'K' order (default), the axes all get flipped - i, j = np.nested_iters(a, [[0], [1, 2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + i, j = np.nested_iters(a, [[2, 0], [1]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) - i, j = np.nested_iters(a, [[0, 1], [2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + def test_flip_axes(self): + # Test nested iteration with negative axes + a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] - i, j = np.nested_iters(a, [[0, 2], [1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + # In 'K' order (default), the axes all get flipped + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - # In 'C' order, flipping axes is disabled - i, j = np.nested_iters(a, [[0], [1, 2]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - i, j = np.nested_iters(a, [[0, 1], [2]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - i, j = np.nested_iters(a, [[0, 2], [1]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) + # In 'C' order, flipping axes is disabled + i, j = np.nested_iters(a, [[0], [1, 2]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) -def test_iter_nested_iters_broadcast(): - # Test nested iteration with broadcasting - a = arange(2).reshape(2, 1) - b = arange(3).reshape(1, 3) + i, j = np.nested_iters(a, [[0, 1], [2]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) - i, j = np.nested_iters([a, b], [[0], [1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) + i, j = np.nested_iters(a, [[0, 2], [1]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) - i, j = np.nested_iters([a, b], [[1], [0]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) + def test_broadcast(self): + # Test nested iteration with broadcasting + a = arange(2).reshape(2, 1) + b = arange(3).reshape(1, 3) -def test_iter_nested_iters_dtype_copy(): - # Test nested iteration with a copy to change dtype + i, j = np.nested_iters([a, b], [[0], [1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) + + i, j = np.nested_iters([a, b], [[1], [0]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) + + def test_dtype_copy(self): + # Test nested iteration with a copy to change dtype + + # copy + a = arange(6, dtype='i4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readonly', 'copy'], + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) + vals = None + + # updateifcopy + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) + i, j, x, y = (None,)*4 # force the updateifcopy + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_dtype_buffered(self): + # Test nested iteration with buffering to change dtype + + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + flags=['buffered'], + op_flags=['readwrite'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_0d(self): + a = np.arange(12).reshape(2, 3, 2) + i, j = np.nested_iters(a, [[], [1, 0, 2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0, 2], []]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) + + i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) + vals = [] + for x in i: + for y in j: + vals.append([z for z in k]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - # copy - a = arange(6, dtype='i4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - op_flags=['readonly', 'copy'], - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) - vals = None - - # updateifcopy - a = arange(6, dtype='f4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - op_flags=['readwrite', 'updateifcopy'], - casting='same_kind', - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[0, 1, 2], [3, 4, 5]]) - i, j, x, y = (None,)*4 # force the updateifcopy - assert_equal(a, [[1, 2, 3], [4, 5, 6]]) - -def test_iter_nested_iters_dtype_buffered(): - # Test nested iteration with buffering to change dtype - - a = arange(6, dtype='f4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - flags=['buffered'], - op_flags=['readwrite'], - casting='same_kind', - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[1, 2, 3], [4, 5, 6]]) def test_iter_reduction_error(): @@ -2639,28 +2664,6 @@ def test_0d_iter(): assert_equal(vals['d'], 0.5) -def test_0d_nested_iter(): - a = np.arange(12).reshape(2, 3, 2) - i, j = np.nested_iters(a, [[], [1, 0, 2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[1, 0, 2], []]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) - - i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) - vals = [] - for x in i: - for y in j: - vals.append([z for z in k]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - - def test_iter_too_large(): # The total size of the iterator must not exceed the maximum intp due # to broadcasting. Dividing by 1024 will keep it small enough to diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index aaf01999c..7c012e9e8 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -904,7 +904,7 @@ class TestTypes(object): fi = np.finfo(dt) assert_(np.can_cast(fi.min, dt)) assert_(np.can_cast(fi.max, dt)) - + # Custom exception class to test exception propagation in fromiter class NIterError(Exception): @@ -1319,7 +1319,7 @@ def assert_array_strict_equal(x, y): assert_(x.flags.writeable == y.flags.writeable) assert_(x.flags.c_contiguous == y.flags.c_contiguous) assert_(x.flags.f_contiguous == y.flags.f_contiguous) - assert_(x.flags.updateifcopy == y.flags.updateifcopy) + assert_(x.flags.writebackifcopy == y.flags.writebackifcopy) # check endianness assert_(x.dtype.isnative == y.dtype.isnative) @@ -1796,7 +1796,7 @@ class TestAllclose(object): (np.inf, [np.inf])] for (x, y) in data: - yield (self.tst_allclose, x, y) + self.tst_allclose(x, y) def test_ip_not_allclose(self): # Parametric test factory. @@ -1817,7 +1817,7 @@ class TestAllclose(object): (np.array([np.inf, 1]), np.array([0, np.inf]))] for (x, y) in data: - yield (self.tst_not_allclose, x, y) + self.tst_not_allclose(x, y) def test_no_parameter_modification(self): x = np.array([np.inf, 1]) @@ -1901,7 +1901,7 @@ class TestIsclose(object): tests = self.some_close_tests results = self.some_close_results for (x, y), result in zip(tests, results): - yield (assert_array_equal, np.isclose(x, y), result) + assert_array_equal(np.isclose(x, y), result) def tst_all_isclose(self, x, y): assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y)) @@ -1921,19 +1921,19 @@ class TestIsclose(object): def test_ip_all_isclose(self): self.setup() for (x, y) in self.all_close_tests: - yield (self.tst_all_isclose, x, y) + self.tst_all_isclose(x, y) def test_ip_none_isclose(self): self.setup() for (x, y) in self.none_close_tests: - yield (self.tst_none_isclose, x, y) + self.tst_none_isclose(x, y) def test_ip_isclose_allclose(self): self.setup() tests = (self.all_close_tests + self.none_close_tests + self.some_close_tests) for (x, y) in tests: - yield (self.tst_isclose_allclose, x, y) + self.tst_isclose_allclose(x, y) def test_equal_nan(self): assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True]) @@ -2640,7 +2640,7 @@ class TestRequire(object): fd = [None, 'f8', 'c16'] for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names): a = self.generate_all_false(idtype) - yield self.set_and_check_flag, flag, fdtype, a + self.set_and_check_flag(flag, fdtype, a) def test_unknown_requirement(self): a = self.generate_all_false('f8') @@ -2672,7 +2672,7 @@ class TestRequire(object): for flag in self.flag_names: a = ArraySubclass((2, 2)) - yield self.set_and_check_flag, flag, None, a + self.set_and_check_flag(flag, None, a) class TestBroadcast(object): diff --git a/numpy/core/tests/test_print.py b/numpy/core/tests/test_print.py index 4b5c5f81f..6ebb4733c 100644 --- a/numpy/core/tests/test_print.py +++ b/numpy/core/tests/test_print.py @@ -40,7 +40,7 @@ def test_float_types(): """ for t in [np.float32, np.double, np.longdouble]: - yield check_float_type, t + check_float_type(t) def check_nan_inf_float(tp): for x in [np.inf, -np.inf, np.nan]: @@ -56,7 +56,7 @@ def test_nan_inf_float(): """ for t in [np.float32, np.double, np.longdouble]: - yield check_nan_inf_float, t + check_nan_inf_float(t) def check_complex_type(tp): for x in [0, 1, -1, 1e20]: @@ -84,7 +84,7 @@ def test_complex_types(): """ for t in [np.complex64, np.cdouble, np.clongdouble]: - yield check_complex_type, t + check_complex_type(t) def test_complex_inf_nan(): """Check inf/nan formatting of complex types.""" @@ -108,7 +108,7 @@ def test_complex_inf_nan(): } for tp in [np.complex64, np.cdouble, np.clongdouble]: for c, s in TESTS.items(): - yield _check_complex_inf_nan, c, s, tp + _check_complex_inf_nan(c, s, tp) def _check_complex_inf_nan(c, s, dtype): assert_equal(str(dtype(c)), s) @@ -164,12 +164,12 @@ def check_complex_type_print(tp): def test_float_type_print(): """Check formatting when using print """ for t in [np.float32, np.double, np.longdouble]: - yield check_float_type_print, t + check_float_type_print(t) def test_complex_type_print(): """Check formatting when using print """ for t in [np.complex64, np.cdouble, np.clongdouble]: - yield check_complex_type_print, t + check_complex_type_print(t) def test_scalar_format(): """Test the str.format method with NumPy scalar types""" diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py index 27d35fa65..d5423b1f1 100644 --- a/numpy/core/tests/test_records.py +++ b/numpy/core/tests/test_records.py @@ -4,6 +4,7 @@ import sys import collections import pickle import warnings +import textwrap from os import path import numpy as np @@ -101,6 +102,42 @@ class TestFromrecords(object): assert_((mine.data1[i] == 0.0)) assert_((mine.data2[i] == 0.0)) + def test_recarray_repr(self): + a = np.array([(1, 0.1), (2, 0.2)], + dtype=[('foo', '<i4'), ('bar', '<f8')]) + a = np.rec.array(a) + assert_equal( + repr(a), + textwrap.dedent("""\ + rec.array([(1, 0.1), (2, 0.2)], + dtype=[('foo', '<i4'), ('bar', '<f8')])""") + ) + + # make sure non-structured dtypes also show up as rec.array + a = np.array(np.ones(4, dtype='f8')) + assert_(repr(np.rec.array(a)).startswith('rec.array')) + + # check that the 'np.record' part of the dtype isn't shown + a = np.rec.array(np.ones(3, dtype='i4,i4')) + assert_equal(repr(a).find('numpy.record'), -1) + a = np.rec.array(np.ones(3, dtype='i4')) + assert_(repr(a).find('dtype=int32') != -1) + + def test_0d_recarray_repr(self): + arr_0d = np.rec.array((1, 2.0, '2003'), dtype='<i4,<f8,<M8[Y]') + assert_equal(repr(arr_0d), textwrap.dedent("""\ + rec.array((1, 2., '2003'), + dtype=[('f0', '<i4'), ('f1', '<f8'), ('f2', '<M8[Y]')])""")) + + record = arr_0d[()] + assert_equal(repr(record), "(1, 2., '2003')") + # 1.13 converted to python scalars before the repr + try: + np.set_printoptions(legacy='1.13') + assert_equal(repr(record), '(1, 2.0, datetime.date(2003, 1, 1))') + finally: + np.set_printoptions(legacy=False) + def test_recarray_from_repr(self): a = np.array([(1,'ABC'), (2, "DEF")], dtype=[('foo', int), ('bar', 'S4')]) @@ -186,17 +223,6 @@ class TestFromrecords(object): assert_equal(arr2.dtype.type, arr.dtype.type) assert_equal(type(arr2), type(arr)) - def test_recarray_repr(self): - # make sure non-structured dtypes also show up as rec.array - a = np.array(np.ones(4, dtype='f8')) - assert_(repr(np.rec.array(a)).startswith('rec.array')) - - # check that the 'np.record' part of the dtype isn't shown - a = np.rec.array(np.ones(3, dtype='i4,i4')) - assert_equal(repr(a).find('numpy.record'), -1) - a = np.rec.array(np.ones(3, dtype='i4')) - assert_(repr(a).find('dtype=int32') != -1) - def test_recarray_from_names(self): ra = np.rec.array([ (1, 'abc', 3.7000002861022949, 0), @@ -329,6 +355,19 @@ class TestRecord(object): with assert_raises(ValueError): r.setfield([2,3], *r.dtype.fields['f']) + def test_out_of_order_fields(self): + # names in the same order, padding added to descr + x = self.data[['col1', 'col2']] + assert_equal(x.dtype.names, ('col1', 'col2')) + assert_equal(x.dtype.descr, + [('col1', '<i4'), ('col2', '<i4'), ('', '|V4')]) + + # names change order to match indexing, as of 1.14 - descr can't + # represent that + y = self.data[['col2', 'col1']] + assert_equal(y.dtype.names, ('col2', 'col1')) + assert_raises(ValueError, lambda: y.dtype.descr) + def test_pickle_1(self): # Issue #1529 a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)]) diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index fca3312b9..a3b011454 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -20,6 +20,10 @@ from numpy.testing import ( ) from numpy.compat import asbytes, asunicode, long +try: + RecursionError +except NameError: + RecursionError = RuntimeError # python < 3.5 class TestRegression(object): def test_invalid_round(self): @@ -1683,25 +1687,47 @@ class TestRegression(object): # Object arrays with references to themselves can cause problems a = np.array(0, dtype=object) a[()] = a - assert_raises(TypeError, int, a) - assert_raises(TypeError, long, a) - assert_raises(TypeError, float, a) - assert_raises(TypeError, oct, a) - assert_raises(TypeError, hex, a) - + assert_raises(RecursionError, int, a) + assert_raises(RecursionError, long, a) + assert_raises(RecursionError, float, a) + if sys.version_info.major == 2: + # in python 3, this falls back on operator.index, which fails on + # on dtype=object + assert_raises(RecursionError, oct, a) + assert_raises(RecursionError, hex, a) + a[()] = None + + def test_object_array_circular_reference(self): # Test the same for a circular reference. - b = np.array(a, dtype=object) + a = np.array(0, dtype=object) + b = np.array(0, dtype=object) a[()] = b - assert_raises(TypeError, int, a) + b[()] = a + assert_raises(RecursionError, int, a) # NumPy has no tp_traverse currently, so circular references # cannot be detected. So resolve it: - a[()] = 0 + a[()] = None # This was causing a to become like the above a = np.array(0, dtype=object) a[...] += 1 assert_equal(a, 1) + def test_object_array_nested(self): + # but is fine with a reference to a different array + a = np.array(0, dtype=object) + b = np.array(0, dtype=object) + a[()] = b + assert_equal(int(a), int(0)) + assert_equal(long(a), long(0)) + assert_equal(float(a), float(0)) + if sys.version_info.major == 2: + # in python 3, this falls back on operator.index, which fails on + # on dtype=object + assert_equal(oct(a), oct(0)) + assert_equal(hex(a), hex(0)) + + def test_object_array_self_copy(self): # An object array being copied into itself DECREF'ed before INCREF'ing # causing segmentation faults (gh-3787) @@ -2234,6 +2260,15 @@ class TestRegression(object): item2 = copy.copy(item) assert_equal(item, item2) + def test_void_item_memview(self): + va = np.zeros(10, 'V4') + # for now, there is just a futurewarning + assert_warns(FutureWarning, va[:1].item) + # in the future, test we got a bytes copy: + #x = va[:1].item() + #va[0] = b'\xff\xff\xff\xff' + #del va + #assert_equal(x, b'\x00\x00\x00\x00') if __name__ == "__main__": run_module_suite() diff --git a/numpy/core/tests/test_scalar_ctors.py b/numpy/core/tests/test_scalar_ctors.py index 13de36260..e2470779b 100644 --- a/numpy/core/tests/test_scalar_ctors.py +++ b/numpy/core/tests/test_scalar_ctors.py @@ -1,5 +1,5 @@ """ -Test the scalar contructors, which also do type-coercion +Test the scalar constructors, which also do type-coercion """ from __future__ import division, absolute_import, print_function diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index d3cdd69dc..7d0be9cf7 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -4,13 +4,14 @@ import sys import warnings import itertools import operator +import platform import numpy as np from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_raises, assert_almost_equal, assert_allclose, assert_array_equal, - IS_PYPY, suppress_warnings, dec, _gen_alignment_data, + IS_PYPY, suppress_warnings, dec, _gen_alignment_data, assert_warns ) types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, @@ -420,12 +421,14 @@ class TestConversion(object): assert_raises(OverflowError, x.__int__) assert_equal(len(sup.log), 1) + @dec.knownfailureif(platform.machine().startswith("ppc64")) @dec.skipif(np.finfo(np.double) == np.finfo(np.longdouble)) def test_int_from_huge_longdouble(self): - # produce a longdouble that would overflow a double - exp = np.finfo(np.double).maxexp - huge_ld = 1234 * np.longdouble(2) ** exp - huge_i = 1234 * 2 ** exp + # Produce a longdouble that would overflow a double, + # use exponent that avoids bug in Darwin pow function. + exp = np.finfo(np.double).maxexp - 1 + huge_ld = 2 * 1234 * np.longdouble(2) ** exp + huge_i = 2 * 1234 * 2 ** exp assert_(huge_ld != np.inf) assert_equal(int(huge_ld), huge_i) @@ -537,7 +540,7 @@ class TestRepr(object): # long double test cannot work, because eval goes through a python # float for t in [np.float32, np.float64]: - yield self._test_type_repr, t + self._test_type_repr(t) if not IS_PYPY: @@ -560,16 +563,29 @@ class TestMultiply(object): # numpy integers. And errors are raised when multiplied with others. # Some of this behaviour may be controversial and could be open for # change. + accepted_types = set(np.typecodes["AllInteger"]) + deprecated_types = set('?') + forbidden_types = ( + set(np.typecodes["All"]) - accepted_types - deprecated_types) + forbidden_types -= set('V') # can't default-construct void scalars + for seq_type in (list, tuple): seq = seq_type([1, 2, 3]) - for numpy_type in np.typecodes["AllInteger"]: + for numpy_type in accepted_types: i = np.dtype(numpy_type).type(2) assert_equal(seq * i, seq * int(i)) assert_equal(i * seq, int(i) * seq) - for numpy_type in np.typecodes["All"].replace("V", ""): - if numpy_type in np.typecodes["AllInteger"]: - continue + for numpy_type in deprecated_types: + i = np.dtype(numpy_type).type() + assert_equal( + assert_warns(DeprecationWarning, operator.mul, seq, i), + seq * int(i)) + assert_equal( + assert_warns(DeprecationWarning, operator.mul, i, seq), + int(i) * seq) + + for numpy_type in forbidden_types: i = np.dtype(numpy_type).type() assert_raises(TypeError, operator.mul, seq, i) assert_raises(TypeError, operator.mul, i, seq) diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py index ba1843bca..164ab06c7 100644 --- a/numpy/core/tests/test_scalarprint.py +++ b/numpy/core/tests/test_scalarprint.py @@ -26,6 +26,25 @@ class TestRealScalars(object): msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val)) assert_equal(str(styp(val)), want, err_msg=msg) + def test_scalar_cutoffs(self): + # test that both the str and repr of np.float64 behaves + # like python floats in python3. Note that in python2 + # the str has truncated digits, but we do not do this + def check(v): + # we compare str to repr, to avoid python2 truncation behavior + assert_equal(str(np.float64(v)), repr(v)) + assert_equal(repr(np.float64(v)), repr(v)) + + # check we use the same number of significant digits + check(1.12345678901234567890) + check(0.0112345678901234567890) + + # check switch from scientific output to positional and back + check(1e-5) + check(1e-4) + check(1e15) + check(1e16) + def test_dragon4(self): # these tests are adapted from Ryan Juckett's dragon4 implementation, # see dragon4.c for details. diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py index 5c1e569b7..deb2a407d 100644 --- a/numpy/core/tests/test_shape_base.py +++ b/numpy/core/tests/test_shape_base.py @@ -560,6 +560,28 @@ class TestBlock(object): assert_raises_regex(TypeError, 'tuple', np.block, ([1, 2], [3, 4])) assert_raises_regex(TypeError, 'tuple', np.block, [(1, 2), (3, 4)]) + def test_different_ndims(self): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 1, 3)) + + result = np.block([a, b, c]) + expected = np.array([[[1., 2., 2., 3., 3., 3.]]]) + + assert_equal(result, expected) + + def test_different_ndims_depths(self): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 2, 3)) + + result = np.block([[a, b], [c]]) + expected = np.array([[[1., 2., 2.], + [3., 3., 3.], + [3., 3., 3.]]]) + + assert_equal(result, expected) + if __name__ == "__main__": run_module_suite() diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index bebeddc92..ac97b8b0d 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -2203,6 +2203,105 @@ class TestChoose(object): assert_equal(np.choose(c, (a, 1)), np.array([1, 1])) +class TestRationalFunctions(object): + def test_lcm(self): + self._test_lcm_inner(np.int16) + self._test_lcm_inner(np.uint16) + + def test_lcm_object(self): + self._test_lcm_inner(np.object_) + + def test_gcd(self): + self._test_gcd_inner(np.int16) + self._test_lcm_inner(np.uint16) + + def test_gcd_object(self): + self._test_gcd_inner(np.object_) + + def _test_lcm_inner(self, dtype): + # basic use + a = np.array([12, 120], dtype=dtype) + b = np.array([20, 200], dtype=dtype) + assert_equal(np.lcm(a, b), [60, 600]) + + if not issubclass(dtype, np.unsignedinteger): + # negatives are ignored + a = np.array([12, -12, 12, -12], dtype=dtype) + b = np.array([20, 20, -20, -20], dtype=dtype) + assert_equal(np.lcm(a, b), [60]*4) + + # reduce + a = np.array([3, 12, 20], dtype=dtype) + assert_equal(np.lcm.reduce([3, 12, 20]), 60) + + # broadcasting, and a test including 0 + a = np.arange(6).astype(dtype) + b = 20 + assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20]) + + def _test_gcd_inner(self, dtype): + # basic use + a = np.array([12, 120], dtype=dtype) + b = np.array([20, 200], dtype=dtype) + assert_equal(np.gcd(a, b), [4, 40]) + + if not issubclass(dtype, np.unsignedinteger): + # negatives are ignored + a = np.array([12, -12, 12, -12], dtype=dtype) + b = np.array([20, 20, -20, -20], dtype=dtype) + assert_equal(np.gcd(a, b), [4]*4) + + # reduce + a = np.array([15, 25, 35], dtype=dtype) + assert_equal(np.gcd.reduce(a), 5) + + # broadcasting, and a test including 0 + a = np.arange(6).astype(dtype) + b = 20 + assert_equal(np.gcd(a, b), [20, 1, 2, 1, 4, 5]) + + def test_lcm_overflow(self): + # verify that we don't overflow when a*b does overflow + big = np.int32(np.iinfo(np.int32).max // 11) + a = 2*big + b = 5*big + assert_equal(np.lcm(a, b), 10*big) + + def test_gcd_overflow(self): + for dtype in (np.int32, np.int64): + # verify that we don't overflow when taking abs(x) + # not relevant for lcm, where the result is unrepresentable anyway + a = dtype(np.iinfo(dtype).min) # negative power of two + q = -(a // 4) + assert_equal(np.gcd(a, q*3), q) + assert_equal(np.gcd(a, -q*3), q) + + def test_decimal(self): + from decimal import Decimal + a = np.array([1, 1, -1, -1]) * Decimal('0.20') + b = np.array([1, -1, 1, -1]) * Decimal('0.12') + + assert_equal(np.gcd(a, b), 4*[Decimal('0.04')]) + assert_equal(np.lcm(a, b), 4*[Decimal('0.60')]) + + def test_float(self): + # not well-defined on float due to rounding errors + assert_raises(TypeError, np.gcd, 0.3, 0.4) + assert_raises(TypeError, np.lcm, 0.3, 0.4) + + def test_builtin_long(self): + # sanity check that array coercion is alright for builtin longs + assert_equal(np.array(2**200).item(), 2**200) + + # expressed as prime factors + a = np.array(2**100 * 3**5) + b = np.array([2**100 * 5**7, 2**50 * 3**10]) + assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5]) + assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10]) + + assert_equal(np.gcd(2**100, 3**100), 1) + + def is_longdouble_finfo_bogus(): info = np.finfo(np.longcomplex) return not np.isfinite(np.log10(info.tiny/info.eps)) @@ -2236,53 +2335,53 @@ class TestComplexFunctions(object): def test_branch_cuts(self): # check branch cuts and continuity on them - yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True - yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True - yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True - yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True - yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True) - yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True - yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True - yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1], -1, 1, True + _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True) - yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True - yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True - yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True + _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True) + _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True) # check against bogus branch cuts: assert continuity between quadrants - yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1 - yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1 - yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1 + _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1) - yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1 - yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1 - yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1 + _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1) + _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1) + _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1) def test_branch_cuts_complex64(self): # check branch cuts and continuity on them - yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True, np.complex64 - yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True, np.complex64 - yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True, np.complex64 - yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True, np.complex64 - yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True, np.complex64 + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64) - yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64 - yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64 - yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64 + _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64) - yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64 - yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64 - yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64 + _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64) + _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) # check against bogus branch cuts: assert continuity between quadrants - yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64 - yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64 - yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64 + _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64) - yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64 - yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64 - yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64 + _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64) + _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64) def test_against_cmath(self): import cmath @@ -2390,7 +2489,7 @@ class TestComplexFunctions(object): def test_loss_of_precision(self): for dtype in [np.complex64, np.complex_]: - yield self.check_loss_of_precision, dtype + self.check_loss_of_precision(dtype) @dec.knownfailureif(is_longdouble_finfo_bogus(), "Bogus long double finfo") def test_loss_of_precision_longcomplex(self): |
