diff options
Diffstat (limited to 'numpy')
121 files changed, 1487 insertions, 1304 deletions
diff --git a/numpy/__init__.py b/numpy/__init__.py index 2b8d41798..e1df236bb 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -133,17 +133,9 @@ else: from .version import git_revision as __git_revision__ from .version import version as __version__ - from ._import_tools import PackageLoader - - def pkgload(*packages, **options): - loader = PackageLoader(infunc=True) - return loader(*packages, **options) - __all__ = ['ModuleDeprecationWarning', 'VisibleDeprecationWarning'] - pkgload.__doc__ = PackageLoader.__call__.__doc__ - # Allow distributors to run custom init code from . import _distributor_init @@ -172,8 +164,7 @@ else: from .core import round, abs, max, min - __all__.extend(['__version__', 'pkgload', 'PackageLoader', - 'show_config']) + __all__.extend(['__version__', 'show_config']) __all__.extend(core.__all__) __all__.extend(_mat.__all__) __all__.extend(lib.__all__) diff --git a/numpy/_import_tools.py b/numpy/_import_tools.py deleted file mode 100644 index cb8bc477c..000000000 --- a/numpy/_import_tools.py +++ /dev/null @@ -1,351 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -import warnings - -__all__ = ['PackageLoader'] - -class PackageLoader(object): - def __init__(self, verbose=False, infunc=False): - """ Manages loading packages. - """ - - if infunc: - _level = 2 - else: - _level = 1 - self.parent_frame = frame = sys._getframe(_level) - self.parent_name = eval('__name__', frame.f_globals, frame.f_locals) - parent_path = eval('__path__', frame.f_globals, frame.f_locals) - if isinstance(parent_path, str): - parent_path = [parent_path] - self.parent_path = parent_path - if '__all__' not in frame.f_locals: - exec('__all__ = []', frame.f_globals, frame.f_locals) - self.parent_export_names = eval('__all__', frame.f_globals, frame.f_locals) - - self.info_modules = {} - self.imported_packages = [] - self.verbose = None - - def _get_info_files(self, package_dir, parent_path, parent_package=None): - """ Return list of (package name,info.py file) from parent_path subdirectories. - """ - from glob import glob - files = glob(os.path.join(parent_path, package_dir, 'info.py')) - for info_file in glob(os.path.join(parent_path, package_dir, 'info.pyc')): - if info_file[:-1] not in files: - files.append(info_file) - info_files = [] - for info_file in files: - package_name = os.path.dirname(info_file[len(parent_path)+1:])\ - .replace(os.sep, '.') - if parent_package: - package_name = parent_package + '.' + package_name - info_files.append((package_name, info_file)) - info_files.extend(self._get_info_files('*', - os.path.dirname(info_file), - package_name)) - return info_files - - def _init_info_modules(self, packages=None): - """Initialize info_modules = {<package_name>: <package info.py module>}. - """ - from numpy.compat import npy_load_module - info_files = [] - info_modules = self.info_modules - - if packages is None: - for path in self.parent_path: - info_files.extend(self._get_info_files('*', path)) - else: - for package_name in packages: - package_dir = os.path.join(*package_name.split('.')) - for path in self.parent_path: - names_files = self._get_info_files(package_dir, path) - if names_files: - info_files.extend(names_files) - break - else: - try: - exec('import %s.info as info' % (package_name)) - info_modules[package_name] = info - except ImportError as msg: - self.warn('No scipy-style subpackage %r found in %s. '\ - 'Ignoring: %s'\ - % (package_name, ':'.join(self.parent_path), msg)) - - for package_name, info_file in info_files: - if package_name in info_modules: - continue - fullname = self.parent_name +'.'+ package_name - if info_file[-1]=='c': - filedescriptor = ('.pyc', 'rb', 2) - else: - filedescriptor = ('.py', 'U', 1) - - try: - info_module = npy_load_module(fullname + '.info', - info_file, - filedescriptor) - except Exception as msg: - self.error(msg) - info_module = None - - if info_module is None or getattr(info_module, 'ignore', False): - info_modules.pop(package_name, None) - else: - self._init_info_modules(getattr(info_module, 'depends', [])) - info_modules[package_name] = info_module - - return - - def _get_sorted_names(self): - """ Return package names sorted in the order as they should be - imported due to dependence relations between packages. - """ - - depend_dict = {} - for name, info_module in self.info_modules.items(): - depend_dict[name] = getattr(info_module, 'depends', []) - package_names = [] - - for name in list(depend_dict.keys()): - if not depend_dict[name]: - package_names.append(name) - del depend_dict[name] - - while depend_dict: - for name, lst in list(depend_dict.items()): - new_lst = [n for n in lst if n in depend_dict] - if not new_lst: - package_names.append(name) - del depend_dict[name] - else: - depend_dict[name] = new_lst - - return package_names - - def __call__(self,*packages, **options): - """Load one or more packages into parent package top-level namespace. - - This function is intended to shorten the need to import many - subpackages, say of scipy, constantly with statements such as - - import scipy.linalg, scipy.fftpack, scipy.etc... - - Instead, you can say: - - import scipy - scipy.pkgload('linalg','fftpack',...) - - or - - scipy.pkgload() - - to load all of them in one call. - - If a name which doesn't exist in scipy's namespace is - given, a warning is shown. - - Parameters - ---------- - *packages : arg-tuple - the names (one or more strings) of all the modules one - wishes to load into the top-level namespace. - verbose= : integer - verbosity level [default: -1]. - verbose=-1 will suspend also warnings. - force= : bool - when True, force reloading loaded packages [default: False]. - postpone= : bool - when True, don't load packages [default: False] - - """ - # 2014-10-29, 1.10 - warnings.warn('pkgload and PackageLoader are obsolete ' - 'and will be removed in a future version of numpy', - DeprecationWarning, stacklevel=2) - frame = self.parent_frame - self.info_modules = {} - if options.get('force', False): - self.imported_packages = [] - self.verbose = verbose = options.get('verbose', -1) - postpone = options.get('postpone', None) - self._init_info_modules(packages or None) - - self.log('Imports to %r namespace\n----------------------------'\ - % self.parent_name) - - for package_name in self._get_sorted_names(): - if package_name in self.imported_packages: - continue - info_module = self.info_modules[package_name] - global_symbols = getattr(info_module, 'global_symbols', []) - postpone_import = getattr(info_module, 'postpone_import', False) - if (postpone and not global_symbols) \ - or (postpone_import and postpone is not None): - continue - - old_object = frame.f_locals.get(package_name, None) - - cmdstr = 'import '+package_name - if self._execcmd(cmdstr): - continue - self.imported_packages.append(package_name) - - if verbose!=-1: - new_object = frame.f_locals.get(package_name) - if old_object is not None and old_object is not new_object: - self.warn('Overwriting %s=%s (was %s)' \ - % (package_name, self._obj2repr(new_object), - self._obj2repr(old_object))) - - if '.' not in package_name: - self.parent_export_names.append(package_name) - - for symbol in global_symbols: - if symbol=='*': - symbols = eval('getattr(%s,"__all__",None)'\ - % (package_name), - frame.f_globals, frame.f_locals) - if symbols is None: - symbols = eval('dir(%s)' % (package_name), - frame.f_globals, frame.f_locals) - symbols = [s for s in symbols if not s.startswith('_')] - else: - symbols = [symbol] - - if verbose!=-1: - old_objects = {} - for s in symbols: - if s in frame.f_locals: - old_objects[s] = frame.f_locals[s] - - cmdstr = 'from '+package_name+' import '+symbol - if self._execcmd(cmdstr): - continue - - if verbose!=-1: - for s, old_object in old_objects.items(): - new_object = frame.f_locals[s] - if new_object is not old_object: - self.warn('Overwriting %s=%s (was %s)' \ - % (s, self._obj2repr(new_object), - self._obj2repr(old_object))) - - if symbol=='*': - self.parent_export_names.extend(symbols) - else: - self.parent_export_names.append(symbol) - - return - - def _execcmd(self, cmdstr): - """ Execute command in parent_frame.""" - frame = self.parent_frame - try: - exec (cmdstr, frame.f_globals, frame.f_locals) - except Exception as msg: - self.error('%s -> failed: %s' % (cmdstr, msg)) - return True - else: - self.log('%s -> success' % (cmdstr)) - return - - def _obj2repr(self, obj): - """ Return repr(obj) with""" - module = getattr(obj, '__module__', None) - file = getattr(obj, '__file__', None) - if module is not None: - return repr(obj) + ' from ' + module - if file is not None: - return repr(obj) + ' from ' + file - return repr(obj) - - def log(self, mess): - if self.verbose>1: - print(str(mess), file=sys.stderr) - def warn(self, mess): - if self.verbose>=0: - print(str(mess), file=sys.stderr) - def error(self, mess): - if self.verbose!=-1: - print(str(mess), file=sys.stderr) - - def _get_doc_title(self, info_module): - """ Get the title from a package info.py file. - """ - title = getattr(info_module, '__doc_title__', None) - if title is not None: - return title - title = getattr(info_module, '__doc__', None) - if title is not None: - title = title.lstrip().split('\n', 1)[0] - return title - return '* Not Available *' - - def _format_titles(self,titles,colsep='---'): - display_window_width = 70 # How to determine the correct value in runtime?? - lengths = [len(name)-name.find('.')-1 for (name, title) in titles]+[0] - max_length = max(lengths) - lines = [] - for (name, title) in titles: - name = name[name.find('.')+1:] - w = max_length - len(name) - words = title.split() - line = '%s%s %s' % (name, w*' ', colsep) - tab = len(line) * ' ' - while words: - word = words.pop(0) - if len(line)+len(word)>display_window_width: - lines.append(line) - line = tab - line += ' ' + word - lines.append(line) - return '\n'.join(lines) - - def get_pkgdocs(self): - """ Return documentation summary of subpackages. - """ - import sys - self.info_modules = {} - self._init_info_modules(None) - - titles = [] - symbols = [] - for package_name, info_module in self.info_modules.items(): - global_symbols = getattr(info_module, 'global_symbols', []) - fullname = self.parent_name +'.'+ package_name - note = '' - if fullname not in sys.modules: - note = ' [*]' - titles.append((fullname, self._get_doc_title(info_module) + note)) - if global_symbols: - symbols.append((package_name, ', '.join(global_symbols))) - - retstr = self._format_titles(titles) +\ - '\n [*] - using a package requires explicit import (see pkgload)' - - - if symbols: - retstr += """\n\nGlobal symbols from subpackages"""\ - """\n-------------------------------\n""" +\ - self._format_titles(symbols, '-->') - - return retstr - -class PackageLoaderDebug(PackageLoader): - def _execcmd(self, cmdstr): - """ Execute command in parent_frame.""" - frame = self.parent_frame - print('Executing', repr(cmdstr), '...', end=' ') - sys.stdout.flush() - exec (cmdstr, frame.f_globals, frame.f_locals) - print('ok') - sys.stdout.flush() - return - -if int(os.environ.get('NUMPY_IMPORT_DEBUG', '0')): - PackageLoader = PackageLoaderDebug diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index b65920fde..9ebd12cbd 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -1454,11 +1454,10 @@ add_newdoc('numpy.core.multiarray', 'arange', Values are generated within the half-open interval ``[start, stop)`` (in other words, the interval including `start` but excluding `stop`). For integer arguments the function is equivalent to the Python built-in - `range <https://docs.python.org/library/functions.html#func-range>`_ function, - but returns an ndarray rather than a list. + `range` function, but returns an ndarray rather than a list. When using a non-integer step, such as 0.1, the results will often not - be consistent. It is better to use ``linspace`` for these cases. + be consistent. It is better to use `numpy.linspace` for these cases. Parameters ---------- @@ -2843,40 +2842,19 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', ----- Below are the public attributes of this object which were documented in "Guide to NumPy" (we have omitted undocumented public attributes, - as well as documented private attributes): - - * data: A pointer to the memory area of the array as a Python integer. - This memory area may contain data that is not aligned, or not in correct - byte-order. The memory area may not even be writeable. The array - flags and data-type of this array should be respected when passing this - attribute to arbitrary C-code to avoid trouble that can include Python - crashing. User Beware! The value of this attribute is exactly the same - as self._array_interface_['data'][0]. - - * shape (c_intp*self.ndim): A ctypes array of length self.ndim where - the basetype is the C-integer corresponding to dtype('p') on this - platform. This base-type could be c_int, c_long, or c_longlong - depending on the platform. The c_intp type is defined accordingly in - numpy.ctypeslib. The ctypes array contains the shape of the underlying - array. - - * strides (c_intp*self.ndim): A ctypes array of length self.ndim where - the basetype is the same as for the shape attribute. This ctypes array - contains the strides information from the underlying array. This strides - information is important for showing how many bytes must be jumped to - get to the next element in the array. - - * data_as(obj): Return the data pointer cast to a particular c-types object. - For example, calling self._as_parameter_ is equivalent to - self.data_as(ctypes.c_void_p). Perhaps you want to use the data as a - pointer to a ctypes array of floating-point data: - self.data_as(ctypes.POINTER(ctypes.c_double)). - - * shape_as(obj): Return the shape tuple as an array of some other c-types - type. For example: self.shape_as(ctypes.c_short). - - * strides_as(obj): Return the strides tuple as an array of some other - c-types type. For example: self.strides_as(ctypes.c_longlong). + as well as documented private attributes): + + .. autoattribute:: numpy.core._internal._ctypes.data + + .. autoattribute:: numpy.core._internal._ctypes.shape + + .. autoattribute:: numpy.core._internal._ctypes.strides + + .. automethod:: numpy.core._internal._ctypes.data_as + + .. automethod:: numpy.core._internal._ctypes.shape_as + + .. automethod:: numpy.core._internal._ctypes.strides_as Be careful using the ctypes attribute - especially on temporary arrays or arrays constructed on the fly. For example, calling @@ -7158,10 +7136,10 @@ add_newdoc('numpy.core.multiarray', 'datetime_data', array(250, dtype='timedelta64[s]') The result can be used to construct a datetime that uses the same units - as a timedelta:: + as a timedelta >>> np.datetime64('2010', np.datetime_data(dt_25s)) - numpy.datetime64('2010-01-01T00:00:00','25s') + numpy.datetime64('2010-01-01T00:00:00', '25s') """) diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py index 9990bacf0..48ede14d0 100644 --- a/numpy/core/_internal.py +++ b/numpy/core/_internal.py @@ -9,7 +9,7 @@ from __future__ import division, absolute_import, print_function import re import sys -from numpy.compat import basestring +from numpy.compat import basestring, unicode from .multiarray import dtype, array, ndarray try: import ctypes @@ -257,33 +257,72 @@ class _ctypes(object): self._zerod = False def data_as(self, obj): + """ + Return the data pointer cast to a particular c-types object. + For example, calling ``self._as_parameter_`` is equivalent to + ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a + pointer to a ctypes array of floating-point data: + ``self.data_as(ctypes.POINTER(ctypes.c_double))``. + """ return self._ctypes.cast(self._data, obj) def shape_as(self, obj): + """ + Return the shape tuple as an array of some other c-types + type. For example: ``self.shape_as(ctypes.c_short)``. + """ if self._zerod: return None return (obj*self._arr.ndim)(*self._arr.shape) def strides_as(self, obj): + """ + Return the strides tuple as an array of some other + c-types type. For example: ``self.strides_as(ctypes.c_longlong)``. + """ if self._zerod: return None return (obj*self._arr.ndim)(*self._arr.strides) def get_data(self): + """ + A pointer to the memory area of the array as a Python integer. + This memory area may contain data that is not aligned, or not in correct + byte-order. The memory area may not even be writeable. The array + flags and data-type of this array should be respected when passing this + attribute to arbitrary C-code to avoid trouble that can include Python + crashing. User Beware! The value of this attribute is exactly the same + as ``self._array_interface_['data'][0]``. + """ return self._data def get_shape(self): + """ + (c_intp*self.ndim): A ctypes array of length self.ndim where + the basetype is the C-integer corresponding to ``dtype('p')`` on this + platform. This base-type could be `ctypes.c_int`, `ctypes.c_long`, or + `ctypes.c_longlong` depending on the platform. + The c_intp type is defined accordingly in `numpy.ctypeslib`. + The ctypes array contains the shape of the underlying array. + """ return self.shape_as(_getintp_ctype()) def get_strides(self): + """ + (c_intp*self.ndim): A ctypes array of length self.ndim where + the basetype is the same as for the shape attribute. This ctypes array + contains the strides information from the underlying array. This strides + information is important for showing how many bytes must be jumped to + get to the next element in the array. + """ return self.strides_as(_getintp_ctype()) def get_as_parameter(self): return self._ctypes.c_void_p(self._data) - data = property(get_data, None, doc="c-types data") - shape = property(get_shape, None, doc="c-types shape") - strides = property(get_strides, None, doc="c-types strides") + data = property(get_data) + shape = property(get_shape) + strides = property(get_strides) _as_parameter_ = property(get_as_parameter, None, doc="_as parameter_") @@ -294,7 +333,7 @@ def _newnames(datatype, order): """ oldnames = datatype.names nameslist = list(oldnames) - if isinstance(order, str): + if isinstance(order, (str, unicode)): order = [order] seen = set() if isinstance(order, (list, tuple)): @@ -444,46 +483,46 @@ _pep3118_standard_map = { } _pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) -def _dtype_from_pep3118(spec): - class Stream(object): - def __init__(self, s): - self.s = s - self.byteorder = '@' +class _Stream(object): + def __init__(self, s): + self.s = s + self.byteorder = '@' - def advance(self, n): - res = self.s[:n] - self.s = self.s[n:] - return res + def advance(self, n): + res = self.s[:n] + self.s = self.s[n:] + return res - def consume(self, c): - if self.s[:len(c)] == c: - self.advance(len(c)) - return True - return False - - def consume_until(self, c): - if callable(c): - i = 0 - while i < len(self.s) and not c(self.s[i]): - i = i + 1 - return self.advance(i) - else: - i = self.s.index(c) - res = self.advance(i) - self.advance(len(c)) - return res + def consume(self, c): + if self.s[:len(c)] == c: + self.advance(len(c)) + return True + return False - @property - def next(self): - return self.s[0] + def consume_until(self, c): + if callable(c): + i = 0 + while i < len(self.s) and not c(self.s[i]): + i = i + 1 + return self.advance(i) + else: + i = self.s.index(c) + res = self.advance(i) + self.advance(len(c)) + return res - def __bool__(self): - return bool(self.s) - __nonzero__ = __bool__ + @property + def next(self): + return self.s[0] - stream = Stream(spec) + def __bool__(self): + return bool(self.s) + __nonzero__ = __bool__ + +def _dtype_from_pep3118(spec): + stream = _Stream(spec) dtype, align = __dtype_from_pep3118(stream, is_subdtype=False) return dtype diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py index b4aeaa277..7f2541667 100644 --- a/numpy/core/code_generators/generate_numpy_api.py +++ b/numpy/core/code_generators/generate_numpy_api.py @@ -46,11 +46,11 @@ static int _import_array(void) { int st; - PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray"); + PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); PyObject *c_api = NULL; if (numpy == NULL) { - PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); + PyErr_SetString(PyExc_ImportError, "numpy.core._multiarray_umath failed to import"); return -1; } c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); diff --git a/numpy/core/code_generators/generate_ufunc_api.py b/numpy/core/code_generators/generate_ufunc_api.py index 3bcf137f7..1b0143e88 100644 --- a/numpy/core/code_generators/generate_ufunc_api.py +++ b/numpy/core/code_generators/generate_ufunc_api.py @@ -36,11 +36,12 @@ static void **PyUFunc_API=NULL; static NPY_INLINE int _import_umath(void) { - PyObject *numpy = PyImport_ImportModule("numpy.core.umath"); + PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); PyObject *c_api = NULL; if (numpy == NULL) { - PyErr_SetString(PyExc_ImportError, "numpy.core.umath failed to import"); + PyErr_SetString(PyExc_ImportError, + "numpy.core._multiarray_umath failed to import"); return -1; } c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index 632bcb41f..6dc01877b 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -1080,6 +1080,8 @@ def make_code(funcdict, filename): Please make changes to the code generator program (%s) **/ #include "cpuid.h" + #include "ufunc_object.h" + #include "ufunc_type_resolution.h" %s static int diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py index 163f125c2..5b8689235 100644 --- a/numpy/core/einsumfunc.py +++ b/numpy/core/einsumfunc.py @@ -1319,8 +1319,8 @@ def einsum(*operands, **kwargs): """ - # Grab non-einsum kwargs; never optimize 2-argument case. - optimize_arg = kwargs.pop('optimize', len(operands) > 3) + # Grab non-einsum kwargs; do not optimize by default. + optimize_arg = kwargs.pop('optimize', False) # If no optimization, run pure einsum if optimize_arg is False: diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py index fb72bada5..799b1418d 100644 --- a/numpy/core/function_base.py +++ b/numpy/core/function_base.py @@ -71,7 +71,10 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None): -------- arange : Similar to `linspace`, but uses a step size (instead of the number of samples). - logspace : Samples uniformly distributed in log space. + geomspace : Similar to `linspace`, but with numbers spaced evenly on a log + scale (a geometric progression). + logspace : Similar to `geomspace`, but with the end points specified as + logarithms. Examples -------- diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py new file mode 100644 index 000000000..8b07bccf2 --- /dev/null +++ b/numpy/core/multiarray.py @@ -0,0 +1,34 @@ +""" +Create the numpy.core.multiarray namespace for backward compatibility. In v1.16 +the multiarray and umath c-extension modules were merged into a single +_multiarray_umath extension module. So we replicate the old namespace +by importing from the extension module. +""" + +from . import _multiarray_umath +from numpy.core._multiarray_umath import * +from numpy.core._multiarray_umath import (_fastCopyAndTranspose, _flagdict, _insert, + _reconstruct, _vec_string, _ARRAY_API, _monotonicity) + +__all__ = ['_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS', + 'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS', + 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI', + 'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', + '_fastCopyAndTranspose', '_flagdict', '_insert', '_reconstruct', + '_vec_string', '_monotonicity', + 'add_docstring', 'arange', 'array', 'bincount', 'broadcast', 'busday_count', + 'busday_offset', 'busdaycalendar', 'can_cast', 'compare_chararrays', + 'concatenate', 'copyto', 'correlate', 'correlate2', 'count_nonzero', + 'c_einsum', 'datetime_as_string', 'datetime_data', 'digitize', 'dot', + 'dragon4_positional', 'dragon4_scientific', 'dtype', 'empty', 'empty_like', + 'error', 'flagsobj', 'flatiter', 'format_longfloat', 'frombuffer', + 'fromfile', 'fromiter', 'fromstring', 'getbuffer', 'inner', 'int_asbuffer', + 'interp', 'interp_complex', 'is_busday', 'lexsort', 'matmul', + 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', 'nested_iters', + 'newbuffer', 'normalize_axis_index', 'packbits', 'promote_types', + 'putmask', 'ravel_multi_index', 'result_type', 'scalar', + 'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops', + 'set_string_function', 'set_typeDict', 'shares_memory', 'test_interrupt', + 'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot', + 'where', 'zeros'] + diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index e5570791a..1b4818b76 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -2850,16 +2850,11 @@ class errstate(object): Notes ----- - The ``with`` statement was introduced in Python 2.5, and can only be used - there by importing it: ``from __future__ import with_statement``. In - earlier Python versions the ``with`` statement is not available. - For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. Examples -------- - >>> from __future__ import with_statement # use 'with' in Python 2.5 >>> olderr = np.seterr(all='ignore') # Set error handling to known state. >>> np.arange(3) / 0. @@ -2919,10 +2914,7 @@ True_ = bool_(True) def extend_all(module): existing = set(__all__) - try: - mall = getattr(module, '__all__') - except AttributeError: - mall = [k for k in module.__dict__.keys() if not k.startswith('_')] + mall = getattr(module, '__all__') for a in mall: if a not in existing: __all__.append(a) diff --git a/numpy/core/records.py b/numpy/core/records.py index 612d39322..a483871ba 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -42,7 +42,7 @@ import warnings from . import numeric as sb from . import numerictypes as nt -from numpy.compat import isfileobj, bytes, long +from numpy.compat import isfileobj, bytes, long, unicode from .arrayprint import get_printoptions # All of the functions allow formats to be a dtype @@ -174,7 +174,7 @@ class format_parser(object): if (names): if (type(names) in [list, tuple]): pass - elif isinstance(names, str): + elif isinstance(names, (str, unicode)): names = names.split(',') else: raise NameError("illegal input names %s" % repr(names)) diff --git a/numpy/core/setup.py b/numpy/core/setup.py index f826b278f..b306aa4e8 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -514,9 +514,9 @@ def configuration(parent_package='',top_path=None): def generate_numpyconfig_h(ext, build_dir): """Depends on config.h: generate_config_h has to be called before !""" - # put private include directory in build_dir on search path + # put common include directory in build_dir on search path # allows using code generation in headers headers - config.add_include_dirs(join(build_dir, "src", "private")) + config.add_include_dirs(join(build_dir, "src", "common")) config.add_include_dirs(join(build_dir, "src", "npymath")) target = join(build_dir, header_dir, '_numpyconfig.h') @@ -603,7 +603,7 @@ def configuration(parent_package='',top_path=None): generate_numpy_api = generate_api_func('generate_numpy_api') generate_ufunc_api = generate_api_func('generate_ufunc_api') - config.add_include_dirs(join(local_dir, "src", "private")) + config.add_include_dirs(join(local_dir, "src", "common")) config.add_include_dirs(join(local_dir, "src")) config.add_include_dirs(join(local_dir)) @@ -700,9 +700,9 @@ def configuration(parent_package='',top_path=None): npysort_sources = [join('src', 'npysort', 'quicksort.c.src'), join('src', 'npysort', 'mergesort.c.src'), join('src', 'npysort', 'heapsort.c.src'), - join('src', 'private', 'npy_partition.h.src'), + join('src', 'common', 'npy_partition.h.src'), join('src', 'npysort', 'selection.c.src'), - join('src', 'private', 'npy_binsearch.h.src'), + join('src', 'common', 'npy_binsearch.h.src'), join('src', 'npysort', 'binsearch.c.src'), ] config.add_library('npysort', @@ -710,16 +710,55 @@ def configuration(parent_package='',top_path=None): include_dirs=[]) ####################################################################### - # multiarray module # + # _multiarray_umath module - common part # + ####################################################################### + + common_deps = [ + join('src', 'common', 'array_assign.h'), + join('src', 'common', 'binop_override.h'), + join('src', 'common', 'cblasfuncs.h'), + join('src', 'common', 'lowlevel_strided_loops.h'), + join('src', 'common', 'mem_overlap.h'), + join('src', 'common', 'npy_config.h'), + join('src', 'common', 'npy_extint128.h'), + join('src', 'common', 'npy_longdouble.h'), + join('src', 'common', 'templ_common.h.src'), + join('src', 'common', 'ucsnarrow.h'), + join('src', 'common', 'ufunc_override.h'), + join('src', 'common', 'umathmodule.h'), + ] + + common_src = [ + join('src', 'common', 'array_assign.c'), + join('src', 'common', 'mem_overlap.c'), + join('src', 'common', 'npy_longdouble.c'), + join('src', 'common', 'templ_common.h.src'), + join('src', 'common', 'ucsnarrow.c'), + join('src', 'common', 'ufunc_override.c'), + ] + + blas_info = get_info('blas_opt', 0) + if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []): + extra_info = blas_info + # These files are also in MANIFEST.in so that they are always in + # the source distribution independently of HAVE_CBLAS. + common_src.extend([join('src', 'common', 'cblasfuncs.c'), + join('src', 'common', 'python_xerbla.c'), + ]) + if uses_accelerate_framework(blas_info): + common_src.extend(get_sgemv_fix()) + else: + extra_info = {} + + ####################################################################### + # _multiarray_umath module - multiarray part # ####################################################################### multiarray_deps = [ join('src', 'multiarray', 'arrayobject.h'), join('src', 'multiarray', 'arraytypes.h'), - join('src', 'multiarray', 'array_assign.h'), join('src', 'multiarray', 'buffer.h'), join('src', 'multiarray', 'calculation.h'), - join('src', 'multiarray', 'cblasfuncs.h'), join('src', 'multiarray', 'common.h'), join('src', 'multiarray', 'convert_datatype.h'), join('src', 'multiarray', 'convert.h'), @@ -742,17 +781,8 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'shape.h'), join('src', 'multiarray', 'strfuncs.h'), join('src', 'multiarray', 'typeinfo.h'), - join('src', 'multiarray', 'ucsnarrow.h'), join('src', 'multiarray', 'usertypes.h'), join('src', 'multiarray', 'vdot.h'), - join('src', 'private', 'npy_config.h'), - join('src', 'private', 'templ_common.h.src'), - join('src', 'private', 'lowlevel_strided_loops.h'), - join('src', 'private', 'mem_overlap.h'), - join('src', 'private', 'npy_longdouble.h'), - join('src', 'private', 'ufunc_override.h'), - join('src', 'private', 'binop_override.h'), - join('src', 'private', 'npy_extint128.h'), join('include', 'numpy', 'arrayobject.h'), join('include', 'numpy', '_neighborhood_iterator_imp.h'), join('include', 'numpy', 'npy_endian.h'), @@ -778,7 +808,6 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'alloc.c'), join('src', 'multiarray', 'arrayobject.c'), join('src', 'multiarray', 'arraytypes.c.src'), - join('src', 'multiarray', 'array_assign.c'), join('src', 'multiarray', 'array_assign_scalar.c'), join('src', 'multiarray', 'array_assign_array.c'), join('src', 'multiarray', 'buffer.c'), @@ -821,40 +850,11 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'temp_elide.c'), join('src', 'multiarray', 'typeinfo.c'), join('src', 'multiarray', 'usertypes.c'), - join('src', 'multiarray', 'ucsnarrow.c'), join('src', 'multiarray', 'vdot.c'), - join('src', 'private', 'templ_common.h.src'), - join('src', 'private', 'mem_overlap.c'), - join('src', 'private', 'npy_longdouble.c'), - join('src', 'private', 'ufunc_override.c'), ] - blas_info = get_info('blas_opt', 0) - if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []): - extra_info = blas_info - # These files are also in MANIFEST.in so that they are always in - # the source distribution independently of HAVE_CBLAS. - multiarray_src.extend([join('src', 'multiarray', 'cblasfuncs.c'), - join('src', 'multiarray', 'python_xerbla.c'), - ]) - if uses_accelerate_framework(blas_info): - multiarray_src.extend(get_sgemv_fix()) - else: - extra_info = {} - - config.add_extension('multiarray', - sources=multiarray_src + - [generate_config_h, - generate_numpyconfig_h, - generate_numpy_api, - join(codegen_dir, 'generate_numpy_api.py'), - join('*.py')], - depends=deps + multiarray_deps, - libraries=['npymath', 'npysort'], - extra_info=extra_info) - ####################################################################### - # umath module # + # _multiarray_umath module - umath part # ####################################################################### def generate_umath_c(ext, build_dir): @@ -883,34 +883,34 @@ def configuration(parent_package='',top_path=None): join('src', 'umath', 'scalarmath.c.src'), join('src', 'umath', 'ufunc_type_resolution.c'), join('src', 'umath', 'override.c'), - join('src', 'private', 'mem_overlap.c'), - join('src', 'private', 'npy_longdouble.c'), - join('src', 'private', 'ufunc_override.c')] + ] umath_deps = [ generate_umath_py, join('include', 'numpy', 'npy_math.h'), join('include', 'numpy', 'halffloat.h'), join('src', 'multiarray', 'common.h'), - join('src', 'private', 'templ_common.h.src'), + join('src', 'common', 'templ_common.h.src'), join('src', 'umath', 'simd.inc.src'), join('src', 'umath', 'override.h'), join(codegen_dir, 'generate_ufunc_api.py'), - join('src', 'private', 'lowlevel_strided_loops.h'), - join('src', 'private', 'mem_overlap.h'), - join('src', 'private', 'npy_longdouble.h'), - join('src', 'private', 'ufunc_override.h'), - join('src', 'private', 'binop_override.h')] + npymath_sources - - config.add_extension('umath', - sources=umath_src + + ] + + config.add_extension('_multiarray_umath', + sources=multiarray_src + umath_src + + npymath_sources + common_src + [generate_config_h, - generate_numpyconfig_h, - generate_umath_c, - generate_ufunc_api], - depends=deps + umath_deps, - libraries=['npymath'], - ) + generate_numpyconfig_h, + generate_numpy_api, + join(codegen_dir, 'generate_numpy_api.py'), + join('*.py'), + generate_umath_c, + generate_ufunc_api, + ], + depends=deps + multiarray_deps + umath_deps + + common_deps, + libraries=['npymath', 'npysort'], + extra_info=extra_info) ####################################################################### # umath_tests module # @@ -939,9 +939,9 @@ def configuration(parent_package='',top_path=None): config.add_extension('_multiarray_tests', sources=[join('src', 'multiarray', '_multiarray_tests.c.src'), - join('src', 'private', 'mem_overlap.c')], - depends=[join('src', 'private', 'mem_overlap.h'), - join('src', 'private', 'npy_extint128.h')], + join('src', 'common', 'mem_overlap.c')], + depends=[join('src', 'common', 'mem_overlap.h'), + join('src', 'common', 'npy_extint128.h')], libraries=['npymath']) ####################################################################### diff --git a/numpy/core/src/multiarray/array_assign.c b/numpy/core/src/common/array_assign.c index a48e245d8..a48e245d8 100644 --- a/numpy/core/src/multiarray/array_assign.c +++ b/numpy/core/src/common/array_assign.c diff --git a/numpy/core/src/multiarray/array_assign.h b/numpy/core/src/common/array_assign.h index 3fecff007..3fecff007 100644 --- a/numpy/core/src/multiarray/array_assign.h +++ b/numpy/core/src/common/array_assign.h diff --git a/numpy/core/src/private/binop_override.h b/numpy/core/src/common/binop_override.h index 47df63e38..47df63e38 100644 --- a/numpy/core/src/private/binop_override.h +++ b/numpy/core/src/common/binop_override.h diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/common/cblasfuncs.c index 6460c5db1..6460c5db1 100644 --- a/numpy/core/src/multiarray/cblasfuncs.c +++ b/numpy/core/src/common/cblasfuncs.c diff --git a/numpy/core/src/multiarray/cblasfuncs.h b/numpy/core/src/common/cblasfuncs.h index 66ce4ca5b..66ce4ca5b 100644 --- a/numpy/core/src/multiarray/cblasfuncs.h +++ b/numpy/core/src/common/cblasfuncs.h diff --git a/numpy/core/src/private/get_attr_string.h b/numpy/core/src/common/get_attr_string.h index bec87c5ed..bec87c5ed 100644 --- a/numpy/core/src/private/get_attr_string.h +++ b/numpy/core/src/common/get_attr_string.h diff --git a/numpy/core/src/private/lowlevel_strided_loops.h b/numpy/core/src/common/lowlevel_strided_loops.h index f9c671f77..f9c671f77 100644 --- a/numpy/core/src/private/lowlevel_strided_loops.h +++ b/numpy/core/src/common/lowlevel_strided_loops.h diff --git a/numpy/core/src/private/mem_overlap.c b/numpy/core/src/common/mem_overlap.c index 21db1893b..21db1893b 100644 --- a/numpy/core/src/private/mem_overlap.c +++ b/numpy/core/src/common/mem_overlap.c diff --git a/numpy/core/src/private/mem_overlap.h b/numpy/core/src/common/mem_overlap.h index 8044f1663..8044f1663 100644 --- a/numpy/core/src/private/mem_overlap.h +++ b/numpy/core/src/common/mem_overlap.h diff --git a/numpy/core/src/private/npy_binsearch.h.src b/numpy/core/src/common/npy_binsearch.h.src index ce3b34b0e..ce3b34b0e 100644 --- a/numpy/core/src/private/npy_binsearch.h.src +++ b/numpy/core/src/common/npy_binsearch.h.src diff --git a/numpy/core/src/private/npy_cblas.h b/numpy/core/src/common/npy_cblas.h index a083f3bcc..a083f3bcc 100644 --- a/numpy/core/src/private/npy_cblas.h +++ b/numpy/core/src/common/npy_cblas.h diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/common/npy_config.h index 8143e7719..8143e7719 100644 --- a/numpy/core/src/private/npy_config.h +++ b/numpy/core/src/common/npy_config.h diff --git a/numpy/core/src/private/npy_extint128.h b/numpy/core/src/common/npy_extint128.h index a887ff317..a887ff317 100644 --- a/numpy/core/src/private/npy_extint128.h +++ b/numpy/core/src/common/npy_extint128.h diff --git a/numpy/core/src/private/npy_fpmath.h b/numpy/core/src/common/npy_fpmath.h index dbb3fb23d..dbb3fb23d 100644 --- a/numpy/core/src/private/npy_fpmath.h +++ b/numpy/core/src/common/npy_fpmath.h diff --git a/numpy/core/src/private/npy_import.h b/numpy/core/src/common/npy_import.h index 221e1e645..221e1e645 100644 --- a/numpy/core/src/private/npy_import.h +++ b/numpy/core/src/common/npy_import.h diff --git a/numpy/core/src/private/npy_longdouble.c b/numpy/core/src/common/npy_longdouble.c index 508fbceac..508fbceac 100644 --- a/numpy/core/src/private/npy_longdouble.c +++ b/numpy/core/src/common/npy_longdouble.c diff --git a/numpy/core/src/private/npy_longdouble.h b/numpy/core/src/common/npy_longdouble.h index 036b53070..036b53070 100644 --- a/numpy/core/src/private/npy_longdouble.h +++ b/numpy/core/src/common/npy_longdouble.h diff --git a/numpy/core/src/private/npy_partition.h.src b/numpy/core/src/common/npy_partition.h.src index a22cf911c..a22cf911c 100644 --- a/numpy/core/src/private/npy_partition.h.src +++ b/numpy/core/src/common/npy_partition.h.src diff --git a/numpy/core/src/private/npy_pycompat.h b/numpy/core/src/common/npy_pycompat.h index aa0b5c122..aa0b5c122 100644 --- a/numpy/core/src/private/npy_pycompat.h +++ b/numpy/core/src/common/npy_pycompat.h diff --git a/numpy/core/src/private/npy_sort.h b/numpy/core/src/common/npy_sort.h index 8c6f05623..8c6f05623 100644 --- a/numpy/core/src/private/npy_sort.h +++ b/numpy/core/src/common/npy_sort.h diff --git a/numpy/core/src/multiarray/python_xerbla.c b/numpy/core/src/common/python_xerbla.c index bdf0b9058..bdf0b9058 100644 --- a/numpy/core/src/multiarray/python_xerbla.c +++ b/numpy/core/src/common/python_xerbla.c diff --git a/numpy/core/src/private/templ_common.h.src b/numpy/core/src/common/templ_common.h.src index a65a00758..a65a00758 100644 --- a/numpy/core/src/private/templ_common.h.src +++ b/numpy/core/src/common/templ_common.h.src diff --git a/numpy/core/src/multiarray/ucsnarrow.c b/numpy/core/src/common/ucsnarrow.c index 8e293e9f2..8e293e9f2 100644 --- a/numpy/core/src/multiarray/ucsnarrow.c +++ b/numpy/core/src/common/ucsnarrow.c diff --git a/numpy/core/src/multiarray/ucsnarrow.h b/numpy/core/src/common/ucsnarrow.h index fe31a5e25..fe31a5e25 100644 --- a/numpy/core/src/multiarray/ucsnarrow.h +++ b/numpy/core/src/common/ucsnarrow.h diff --git a/numpy/core/src/private/ufunc_override.c b/numpy/core/src/common/ufunc_override.c index 33b54c665..33b54c665 100644 --- a/numpy/core/src/private/ufunc_override.c +++ b/numpy/core/src/common/ufunc_override.c diff --git a/numpy/core/src/private/ufunc_override.h b/numpy/core/src/common/ufunc_override.h index 5b269d270..5b269d270 100644 --- a/numpy/core/src/private/ufunc_override.h +++ b/numpy/core/src/common/ufunc_override.h diff --git a/numpy/core/src/common/umathmodule.h b/numpy/core/src/common/umathmodule.h new file mode 100644 index 000000000..6998596ee --- /dev/null +++ b/numpy/core/src/common/umathmodule.h @@ -0,0 +1,8 @@ +#include "__umath_generated.c" +#include "__ufunc_api.c" + +PyObject * add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args); +PyObject * ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)); +int initumath(PyObject *m); + + diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index b4158ec8e..d622effe6 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -733,7 +733,7 @@ VOID_getitem(void *input, void *vap) return (PyObject *)ret; } - return PyBytes_FromStringAndSize(PyArray_DATA(ap), descr->elsize); + return PyBytes_FromStringAndSize(ip, descr->elsize); } diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c index 21dbdefd6..c8e3da8bc 100644 --- a/numpy/core/src/multiarray/buffer.c +++ b/numpy/core/src/multiarray/buffer.c @@ -175,6 +175,14 @@ _is_natively_aligned_at(PyArray_Descr *descr, return 1; } +/* + * Fill in str with an appropriate PEP 3118 format string, based on + * descr. For structured dtypes, calls itself recursively. Each call extends + * str at offset then updates offset, and uses descr->byteorder, (and + * possibly the byte order in obj) to determine the byte-order char. + * + * Returns 0 for success, -1 for failure + */ static int _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, PyObject* obj, Py_ssize_t *offset, @@ -195,8 +203,8 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, PyObject *item, *subarray_tuple; Py_ssize_t total_count = 1; Py_ssize_t dim_size; + Py_ssize_t old_offset; char buf[128]; - int old_offset; int ret; if (PyTuple_Check(descr->subarray->shape)) { @@ -230,15 +238,15 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, return ret; } else if (PyDataType_HASFIELDS(descr)) { - int base_offset = *offset; + Py_ssize_t base_offset = *offset; _append_str(str, "T{"); for (k = 0; k < PyTuple_GET_SIZE(descr->names); ++k) { PyObject *name, *item, *offset_obj, *tmp; PyArray_Descr *child; char *p; - Py_ssize_t len; - int new_offset; + Py_ssize_t len, new_offset; + int ret; name = PyTuple_GET_ITEM(descr->names, k); item = PyDict_GetItem(descr->fields, name); @@ -266,8 +274,11 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, } /* Insert child item */ - _buffer_format_string(child, str, obj, offset, + ret = _buffer_format_string(child, str, obj, offset, active_byteorder); + if (ret < 0) { + return -1; + } /* Insert field name */ #if defined(NPY_PY3K) @@ -393,8 +404,8 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, case NPY_CFLOAT: if (_append_str(str, "Zf")) return -1; break; case NPY_CDOUBLE: if (_append_str(str, "Zd")) return -1; break; case NPY_CLONGDOUBLE: if (_append_str(str, "Zg")) return -1; break; - /* XXX: datetime */ - /* XXX: timedelta */ + /* XXX NPY_DATETIME */ + /* XXX NPY_TIMEDELTA */ case NPY_OBJECT: if (_append_char(str, 'O')) return -1; break; case NPY_STRING: { char buf[128]; @@ -468,10 +479,33 @@ _buffer_info_new(PyObject *obj) info = malloc(sizeof(_buffer_info_t)); if (info == NULL) { + PyErr_NoMemory(); goto fail; } - if (PyArray_IsScalar(obj, Generic)) { + if (PyArray_IsScalar(obj, Datetime) || PyArray_IsScalar(obj, Timedelta)) { + /* + * Special case datetime64 scalars to remain backward compatible. + * This will change in a future version. + * Note arrays of datetime64 and strutured arrays with datetime64 + * fields will not hit this code path and are currently unsupported + * in _buffer_format_string. + */ + _append_char(&fmt, 'B'); + _append_char(&fmt, '\0'); + info->ndim = 1; + info->shape = malloc(sizeof(Py_ssize_t) * 2); + if (info->shape == NULL) { + PyErr_NoMemory(); + goto fail; + } + info->strides = info->shape + info->ndim; + info->shape[0] = 8; + info->strides[0] = 1; + info->format = fmt.s; + return info; + } + else if (PyArray_IsScalar(obj, Generic)) { descr = PyArray_DescrFromScalar(obj); if (descr == NULL) { goto fail; @@ -493,6 +527,7 @@ _buffer_info_new(PyObject *obj) else { info->shape = malloc(sizeof(Py_ssize_t) * PyArray_NDIM(arr) * 2 + 1); if (info->shape == NULL) { + PyErr_NoMemory(); goto fail; } info->strides = info->shape + PyArray_NDIM(arr); @@ -796,8 +831,6 @@ gentype_getbuffer(PyObject *self, Py_buffer *view, int flags) /* Fill in information */ info = _buffer_get_info(self); if (info == NULL) { - PyErr_SetString(PyExc_BufferError, - "could not get scalar buffer information"); goto fail; } @@ -820,6 +853,9 @@ gentype_getbuffer(PyObject *self, Py_buffer *view, int flags) } #endif view->len = elsize; + if (PyArray_IsScalar(self, Datetime) || PyArray_IsScalar(self, Timedelta)) { + elsize = 1; /* descr->elsize,char is 8,'M', but we return 1,'B' */ + } view->itemsize = elsize; Py_DECREF(descr); diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c index e47dd81b9..90ee2c5b2 100644 --- a/numpy/core/src/multiarray/calculation.c +++ b/numpy/core/src/multiarray/calculation.c @@ -5,6 +5,7 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE #include "numpy/arrayobject.h" +#include "lowlevel_strided_loops.h" #include "npy_config.h" @@ -1102,7 +1103,18 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o if (out == newin) { outgood = 1; } - if (!outgood && PyArray_ISONESEGMENT(out) && + + + /* make sure the shape of the output array is the same */ + if (!PyArray_SAMESHAPE(newin, out)) { + PyErr_SetString(PyExc_ValueError, "clip: Output array must have the" + "same shape as the input."); + goto fail; + } + + if (!outgood && PyArray_EQUIVALENTLY_ITERABLE( + self, out, PyArray_TRIVIALLY_ITERABLE_OP_READ, + PyArray_TRIVIALLY_ITERABLE_OP_NOREAD) && PyArray_CHKFLAGS(out, NPY_ARRAY_ALIGNED) && PyArray_ISNOTSWAPPED(out) && PyArray_EquivTypes(PyArray_DESCR(out), indescr)) { @@ -1111,15 +1123,19 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o /* * Do we still not have a suitable output array? - * Create one, now + * Create one, now. No matter why the array is not suitable a copy has + * to be made. This may be just to avoid memory overlap though. */ if (!outgood) { int oflags; - if (PyArray_ISFORTRAN(out)) + if (PyArray_ISFORTRAN(self)) { oflags = NPY_ARRAY_FARRAY; - else + } + else { oflags = NPY_ARRAY_CARRAY; - oflags |= NPY_ARRAY_WRITEBACKIFCOPY | NPY_ARRAY_FORCECAST; + } + oflags |= (NPY_ARRAY_WRITEBACKIFCOPY | NPY_ARRAY_FORCECAST | + NPY_ARRAY_ENSURECOPY); Py_INCREF(indescr); newout = (PyArrayObject*)PyArray_FromArray(out, indescr, oflags); if (newout == NULL) { @@ -1131,13 +1147,6 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o Py_INCREF(newout); } - /* make sure the shape of the output array is the same */ - if (!PyArray_SAMESHAPE(newin, newout)) { - PyErr_SetString(PyExc_ValueError, "clip: Output array must have the" - "same shape as the input."); - goto fail; - } - /* Now we can call the fast-clip function */ min_data = max_data = NULL; if (mina != NULL) { diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 938850997..f1b8a0209 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -92,6 +92,7 @@ swab_separator(const char *sep) s = start = malloc(strlen(sep)+3); if (s == NULL) { + PyErr_NoMemory(); return NULL; } /* add space to front if there isn't one */ @@ -1390,10 +1391,12 @@ _array_from_buffer_3118(PyObject *memoryview) if (!is_ctypes) { /* This object has no excuse for a broken PEP3118 buffer */ - PyErr_SetString( + PyErr_Format( PyExc_RuntimeError, - "Item size computed from the PEP 3118 buffer format " - "string does not match the actual item size."); + "Item size %zd for PEP 3118 buffer format " + "string %s does not match the dtype %c item size %d.", + view->itemsize, view->format, descr->type, + descr->elsize); Py_DECREF(descr); return NULL; } diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index a0dc98f0e..1d44cf8be 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -2401,7 +2401,7 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args)) if (ret == NULL) { return NULL; } - mod = PyImport_ImportModule("numpy.core.multiarray"); + mod = PyImport_ImportModule("numpy.core._multiarray_umath"); if (mod == NULL) { Py_DECREF(ret); return NULL; diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c index abbf05220..14dfa71c2 100644 --- a/numpy/core/src/multiarray/dragon4.c +++ b/numpy/core/src/multiarray/dragon4.c @@ -114,7 +114,7 @@ LogBase2_64(npy_uint64 val) return LogBase2_32((npy_uint32)val); } -#if defined(HAVE_LDOUBLE_IEEE_QUAD_LE) +#if defined(HAVE_LDOUBLE_IEEE_QUAD_LE) || defined(HAVE_LDOUBLE_IEEE_QUAD_BE) static npy_uint32 LogBase2_128(npy_uint64 hi, npy_uint64 lo) { @@ -217,7 +217,8 @@ BigInt_Set_uint64(BigInt *i, npy_uint64 val) #if (defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \ defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) || \ - defined(HAVE_LDOUBLE_IEEE_QUAD_LE)) + defined(HAVE_LDOUBLE_IEEE_QUAD_LE) || \ + defined(HAVE_LDOUBLE_IEEE_QUAD_BE)) static void BigInt_Set_2x_uint64(BigInt *i, npy_uint64 hi, npy_uint64 lo) { @@ -2845,7 +2846,7 @@ Dragon4_PrintFloat_IEEE_binary128_be( #if (defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \ defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE)) /* - * IBM extended precision 128-bit floating-point format, aka IBM double-dobule + * IBM extended precision 128-bit floating-point format, aka IBM double-double * * IBM's double-double type is a pair of IEEE binary64 values, which you add * together to get a total value. The exponents are arranged so that the lower @@ -2882,12 +2883,15 @@ Dragon4_PrintFloat_IEEE_binary128_be( */ static npy_uint32 Dragon4_PrintFloat_IBM_double_double( - Dragon4_Scratch *scratch, FloatVal128 val128, Dragon4_Options *opt) + Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) { char *buffer = scratch->repr; npy_uint32 bufferSize = sizeof(scratch->repr); BigInt *bigints = scratch->bigints; + FloatVal128 val128; + FloatUnion128 buf128; + npy_uint32 floatExponent1, floatExponent2; npy_uint64 floatMantissa1, floatMantissa2; npy_uint32 floatSign1, floatSign2; @@ -2908,6 +2912,12 @@ Dragon4_PrintFloat_IBM_double_double( return 0; } + /* The high part always comes before the low part, regardless of the + * endianness of the system. */ + buf128.floatingPoint = *value; + val128.hi = buf128.integer.a; + val128.lo = buf128.integer.b; + /* deconstruct the floating point values */ floatMantissa1 = val128.hi & bitmask_u64(52); floatExponent1 = (val128.hi >> 52) & bitmask_u32(11); @@ -3052,39 +3062,6 @@ Dragon4_PrintFloat_IBM_double_double( signbit, mantissaBit, hasUnequalMargins, opt); } -#if defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) -static npy_uint32 -Dragon4_PrintFloat_IBM_double_double_le( - Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) -{ - FloatVal128 val128; - FloatUnion128 buf128; - - buf128.floatingPoint = *value; - val128.lo = buf128.integer.a; - val128.hi = buf128.integer.b; - - return Dragon4_PrintFloat_IBM_double_double(scratch, val128, opt); -} -#endif /* HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE */ - -#if defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) -static npy_uint32 -Dragon4_PrintFloat_IBM_double_double_be( - Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) -{ - FloatVal128 val128; - FloatUnion128 buf128; - - buf128.floatingPoint = *value; - val128.hi = buf128.integer.a; - val128.lo = buf128.integer.b; - - return Dragon4_PrintFloat_IBM_double_double(scratch, val128, opt); -} - -#endif /* HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE */ - #endif /* HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE | HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE */ #endif /* NPY_FLOAT128 */ diff --git a/numpy/core/src/multiarray/dragon4.h b/numpy/core/src/multiarray/dragon4.h index 383a0949d..2b8b4cef4 100644 --- a/numpy/core/src/multiarray/dragon4.h +++ b/numpy/core/src/multiarray/dragon4.h @@ -75,10 +75,9 @@ #define NPY_LONGDOUBLE_BINFMT_NAME Intel_extended128 #elif defined(HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE) #define NPY_LONGDOUBLE_BINFMT_NAME Motorola_extended96 -#elif defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) - #define NPY_LONGDOUBLE_BINFMT_NAME IBM_double_double_le -#elif defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) - #define NPY_LONGDOUBLE_BINFMT_NAME IBM_double_double_be +#elif (defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \ + defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE)) + #define NPY_LONGDOUBLE_BINFMT_NAME IBM_double_double #else #error No long double representation defined #endif diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index f338226c2..2fdb3ebf6 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -2915,20 +2915,20 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, Py_INCREF(extra_op_dtype); mit->extra_op_dtype = extra_op_dtype; - /* Create an iterator, just to broadcast the arrays?! */ - tmp_iter = NpyIter_MultiNew(mit->numiter, index_arrays, - NPY_ITER_ZEROSIZE_OK | - NPY_ITER_REFS_OK | - NPY_ITER_MULTI_INDEX | - NPY_ITER_DONT_NEGATE_STRIDES, - NPY_KEEPORDER, - NPY_UNSAFE_CASTING, - tmp_op_flags, NULL); - if (tmp_iter == NULL) { - goto fail; - } - if (PyArray_SIZE(subspace) == 1) { + /* Create an iterator, just to broadcast the arrays?! */ + tmp_iter = NpyIter_MultiNew(mit->numiter, index_arrays, + NPY_ITER_ZEROSIZE_OK | + NPY_ITER_REFS_OK | + NPY_ITER_MULTI_INDEX | + NPY_ITER_DONT_NEGATE_STRIDES, + NPY_KEEPORDER, + NPY_UNSAFE_CASTING, + tmp_op_flags, NULL); + if (tmp_iter == NULL) { + goto fail; + } + /* * nditer allows itemsize with npy_intp type, so it works * here, but it would *not* work directly, since elsize @@ -2941,6 +2941,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, "internal error: failed to find output array strides"); goto fail; } + NpyIter_Deallocate(tmp_iter); } else { /* Just use C-order strides (TODO: allow also F-order) */ @@ -2950,7 +2951,6 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, stride *= mit->dimensions[i]; } } - NpyIter_Deallocate(tmp_iter); /* shape is set, and strides is set up to mit->nd, set rest */ PyArray_CreateSortedStridePerm(PyArray_NDIM(subspace), diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index 2e836d1d0..3d2cce5e1 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -1566,7 +1566,7 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args)) if (ret == NULL) { return NULL; } - mod = PyImport_ImportModule("numpy.core.multiarray"); + mod = PyImport_ImportModule("numpy.core._multiarray_umath"); if (mod == NULL) { Py_DECREF(ret); return NULL; diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 6e57f1d6d..8f782cff6 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -19,6 +19,7 @@ #include "structmember.h" #define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _UMATHMODULE #define _MULTIARRAYMODULE #include <numpy/npy_common.h> #include "numpy/arrayobject.h" @@ -54,7 +55,6 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "ctors.h" #include "array_assign.h" #include "common.h" -#include "ufunc_override.h" #include "multiarraymodule.h" #include "cblasfuncs.h" #include "vdot.h" @@ -67,6 +67,17 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "get_attr_string.h" /* + ***************************************************************************** + ** INCLUDE GENERATED CODE ** + ***************************************************************************** + */ +#include "funcs.inc" +#include "loops.h" +#include "umathmodule.h" + +NPY_NO_EXPORT int initscalarmath(PyObject *); + +/* * global variable to determine if legacy printing is enabled, accessible from * C. For simplicity the mode is encoded as an integer where '0' means no * legacy mode, and '113' means 1.13 legacy mode. We can upgrade this if we @@ -2020,7 +2031,7 @@ array_fromstring(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds if (DEPRECATE( "The binary mode of fromstring is deprecated, as it behaves " "surprisingly on unicode inputs. Use frombuffer instead") < 0) { - Py_DECREF(descr); + Py_XDECREF(descr); return NULL; } } @@ -4365,6 +4376,18 @@ static struct PyMethodDef array_module_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"set_legacy_print_mode", (PyCFunction)set_legacy_print_mode, METH_VARARGS, NULL}, + /* from umath */ + {"frompyfunc", + (PyCFunction) ufunc_frompyfunc, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"seterrobj", + (PyCFunction) ufunc_seterr, + METH_VARARGS, NULL}, + {"geterrobj", + (PyCFunction) ufunc_geterr, + METH_VARARGS, NULL}, + {"_add_newdoc_ufunc", (PyCFunction)add_newdoc_ufunc, + METH_VARARGS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; @@ -4382,9 +4405,6 @@ static struct PyMethodDef array_module_methods[] = { static int setup_scalartypes(PyObject *NPY_UNUSED(dict)) { - initialize_casting_tables(); - initialize_numeric_types(); - if (PyType_Ready(&PyBool_Type) < 0) { return -1; } @@ -4624,7 +4644,7 @@ intern_strings(void) #if defined(NPY_PY3K) static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, - "multiarray", + "_multiarray_umath", NULL, -1, array_module_methods, @@ -4638,10 +4658,10 @@ static struct PyModuleDef moduledef = { /* Initialization function for the module */ #if defined(NPY_PY3K) #define RETVAL(x) x -PyMODINIT_FUNC PyInit_multiarray(void) { +PyMODINIT_FUNC PyInit__multiarray_umath(void) { #else #define RETVAL(x) -PyMODINIT_FUNC initmultiarray(void) { +PyMODINIT_FUNC init_multiarray_umath(void) { #endif PyObject *m, *d, *s; PyObject *c_api; @@ -4650,7 +4670,7 @@ PyMODINIT_FUNC initmultiarray(void) { #if defined(NPY_PY3K) m = PyModule_Create(&moduledef); #else - m = Py_InitModule("multiarray", array_module_methods); + m = Py_InitModule("_multiarray_umath", array_module_methods); #endif if (!m) { goto err; @@ -4684,6 +4704,17 @@ PyMODINIT_FUNC initmultiarray(void) { * static structure slots with functions from the Python C_API. */ PyArray_Type.tp_hash = PyObject_HashNotImplemented; + + /* Load the ufunc operators into the array module's namespace */ + if (InitOperators(d) < 0) { + goto err; + } + + initialize_casting_tables(); + initialize_numeric_types(); + if(initscalarmath(m) < 0) + goto err; + if (PyType_Ready(&PyArray_Type) < 0) { goto err; } @@ -4730,6 +4761,16 @@ PyMODINIT_FUNC initmultiarray(void) { PyDict_SetItemString(d, "_ARRAY_API", c_api); Py_DECREF(c_api); + c_api = NpyCapsule_FromVoidPtr((void *)PyUFunc_API, NULL); + if (c_api == NULL) { + goto err; + } + PyDict_SetItemString(d, "_UFUNC_API", c_api); + Py_DECREF(c_api); + if (PyErr_Occurred()) { + goto err; + } + /* * PyExc_Exception should catch all the standard errors that are * now raised instead of the string exception "multiarray.error" @@ -4806,7 +4847,9 @@ PyMODINIT_FUNC initmultiarray(void) { if (set_typeinfo(d) != 0) { goto err; } - + if (initumath(m) != 0) { + goto err; + } return RETVAL(m); err: diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index a32aa47ab..fdd4d7878 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -538,19 +538,22 @@ _void_to_hex(const char* argbuf, const Py_ssize_t arglen, } static PyObject * +_void_scalar_repr(PyObject *obj) { + static PyObject *reprfunc = NULL; + npy_cache_import("numpy.core.arrayprint", + "_void_scalar_repr", &reprfunc); + if (reprfunc == NULL) { + return NULL; + } + return PyObject_CallFunction(reprfunc, "O", obj); +} + +static PyObject * voidtype_repr(PyObject *self) { PyVoidScalarObject *s = (PyVoidScalarObject*) self; if (PyDataType_HASFIELDS(s->descr)) { - static PyObject *reprfunc = NULL; - - npy_cache_import("numpy.core.arrayprint", - "_void_scalar_repr", &reprfunc); - if (reprfunc == NULL) { - return NULL; - } - - return PyObject_CallFunction(reprfunc, "O", self); + return _void_scalar_repr(self); } return _void_to_hex(s->obval, s->descr->elsize, "void(b'", "\\x", "')"); } @@ -560,15 +563,7 @@ voidtype_str(PyObject *self) { PyVoidScalarObject *s = (PyVoidScalarObject*) self; if (PyDataType_HASFIELDS(s->descr)) { - static PyObject *reprfunc = NULL; - - npy_cache_import("numpy.core.arrayprint", - "_void_scalar_repr", &reprfunc); - if (reprfunc == NULL) { - return NULL; - } - - return PyObject_CallFunction(reprfunc, "O", self); + return _void_scalar_repr(self); } return _void_to_hex(s->obval, s->descr->elsize, "b'", "\\x", "'"); } @@ -1875,7 +1870,7 @@ gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args)) } #endif - mod = PyImport_ImportModule("numpy.core.multiarray"); + mod = PyImport_ImportModule("numpy.core._multiarray_umath"); if (mod == NULL) { return NULL; } diff --git a/numpy/core/src/umath/cpuid.c b/numpy/core/src/umath/cpuid.c index 912d51eeb..6744ceb05 100644 --- a/numpy/core/src/umath/cpuid.c +++ b/numpy/core/src/umath/cpuid.c @@ -1,13 +1,11 @@ #define _UMATHMODULE +#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include <Python.h> #include "npy_config.h" -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#define NO_IMPORT_ARRAY - #include "cpuid.h" #define XCR_XFEATURE_ENABLED_MASK 0x0 diff --git a/numpy/core/src/umath/extobj.c b/numpy/core/src/umath/extobj.c index 188054e22..aea1815e8 100644 --- a/numpy/core/src/umath/extobj.c +++ b/numpy/core/src/umath/extobj.c @@ -1,13 +1,11 @@ #define _UMATHMODULE +#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include <Python.h> #include "npy_config.h" -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#define NO_IMPORT_ARRAY - #include "npy_pycompat.h" #include "extobj.h" diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index e988792a2..66b69f555 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1,14 +1,12 @@ /* -*- c -*- */ #define _UMATHMODULE +#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "Python.h" #include "npy_config.h" -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#define NO_IMPORT_ARRAY - #include "numpy/npy_common.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" @@ -1874,9 +1872,13 @@ NPY_NO_EXPORT void } else { BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; + @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in1)) ? in1 : in2; + in1 = (in1 @OP@ in2 || npy_isnan(in1)) ? in1 : in2; + if (npy_isnan(in1)) { + npy_set_floatstatus_invalid(); + } + *((@type@ *)op1) = in1; } } } diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c index 8136d7b3f..6d04ce372 100644 --- a/numpy/core/src/umath/reduction.c +++ b/numpy/core/src/umath/reduction.c @@ -7,15 +7,13 @@ * See LICENSE.txt for the license. */ #define _UMATHMODULE +#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define PY_SSIZE_T_CLEAN #include <Python.h> #include "npy_config.h" -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#define NO_IMPORT_ARRAY - #include <numpy/arrayobject.h> #include "npy_config.h" diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src index 3e29c4b4e..e98d9f865 100644 --- a/numpy/core/src/umath/scalarmath.c.src +++ b/numpy/core/src/umath/scalarmath.c.src @@ -7,13 +7,11 @@ */ #define _UMATHMODULE +#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "Python.h" #include "npy_config.h" -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#define NO_IMPORT_ARRAY - #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "numpy/arrayscalars.h" diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 20c448d8b..459b0a594 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -24,15 +24,13 @@ * */ #define _UMATHMODULE +#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "Python.h" #include "npy_config.h" -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#define NO_IMPORT_ARRAY - #include "npy_pycompat.h" #include "numpy/arrayobject.h" diff --git a/numpy/core/src/umath/ufunc_object.h b/numpy/core/src/umath/ufunc_object.h index 5438270f1..f5de9f9b7 100644 --- a/numpy/core/src/umath/ufunc_object.h +++ b/numpy/core/src/umath/ufunc_object.h @@ -1,6 +1,8 @@ #ifndef _NPY_UMATH_UFUNC_OBJECT_H_ #define _NPY_UMATH_UFUNC_OBJECT_H_ +#include <numpy/ufuncobject.h> + NPY_NO_EXPORT PyObject * ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args); diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 1766ba564..807b03512 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -9,14 +9,12 @@ * See LICENSE.txt for the license. */ #define _UMATHMODULE +#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "Python.h" #include "npy_config.h" -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#define NO_IMPORT_ARRAY - #include "npy_pycompat.h" #include "numpy/ufuncobject.h" diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c index 9291a5138..20bd2b0a8 100644 --- a/numpy/core/src/umath/umathmodule.c +++ b/numpy/core/src/umath/umathmodule.c @@ -16,12 +16,12 @@ * __ufunc_api.c */ #define _UMATHMODULE +#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "Python.h" #include "npy_config.h" -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" @@ -30,20 +30,6 @@ #include "numpy/npy_math.h" -/* - ***************************************************************************** - ** INCLUDE GENERATED CODE ** - ***************************************************************************** - */ -#include "funcs.inc" -#include "loops.h" -#include "ufunc_object.h" -#include "ufunc_type_resolution.h" -#include "__umath_generated.c" -#include "__ufunc_api.c" - -NPY_NO_EXPORT int initscalarmath(PyObject *); - static PyUFuncGenericFunction pyfunc_functions[] = {PyUFunc_On_Om}; static int @@ -82,7 +68,7 @@ object_ufunc_loop_selector(PyUFuncObject *ufunc, return 0; } -static PyObject * +PyObject * ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)) { /* Keywords are ignored for now */ @@ -179,7 +165,7 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS } /* docstring in numpy.add_newdocs.py */ -static PyObject * +PyObject * add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args) { PyUFuncObject *ufunc; @@ -270,97 +256,24 @@ intern_strings(void) npy_um_str_array_wrap && npy_um_str_array_finalize && npy_um_str_ufunc; } -/* Setup the umath module */ -/* Remove for time being, it is declared in __ufunc_api.h */ -/*static PyTypeObject PyUFunc_Type;*/ - -static struct PyMethodDef methods[] = { - {"frompyfunc", - (PyCFunction) ufunc_frompyfunc, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"seterrobj", - (PyCFunction) ufunc_seterr, - METH_VARARGS, NULL}, - {"geterrobj", - (PyCFunction) ufunc_geterr, - METH_VARARGS, NULL}, - {"_add_newdoc_ufunc", (PyCFunction)add_newdoc_ufunc, - METH_VARARGS, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - - -#if defined(NPY_PY3K) -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "umath", - NULL, - -1, - methods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -#include <stdio.h> +/* Setup the umath part of the module */ -#if defined(NPY_PY3K) -#define RETVAL(x) x -PyMODINIT_FUNC PyInit_umath(void) -#else -#define RETVAL(x) -PyMODINIT_FUNC initumath(void) -#endif +int initumath(PyObject *m) { - PyObject *m, *d, *s, *s2, *c_api; + PyObject *d, *s, *s2; int UFUNC_FLOATING_POINT_SUPPORT = 1; #ifdef NO_UFUNC_FLOATING_POINT_SUPPORT UFUNC_FLOATING_POINT_SUPPORT = 0; #endif - /* Create the module and add the functions */ -#if defined(NPY_PY3K) - m = PyModule_Create(&moduledef); -#else - m = Py_InitModule("umath", methods); -#endif - if (!m) { - goto err; - } - - /* Import the array */ - if (_import_array() < 0) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, - "umath failed: Could not import array core."); - } - goto err; - } /* Initialize the types */ if (PyType_Ready(&PyUFunc_Type) < 0) - goto err; + return -1; /* Add some symbolic constants to the module */ d = PyModule_GetDict(m); - c_api = NpyCapsule_FromVoidPtr((void *)PyUFunc_API, NULL); - if (PyErr_Occurred()) { - goto err; - } - PyDict_SetItemString(d, "_UFUNC_API", c_api); - Py_DECREF(c_api); - if (PyErr_Occurred()) { - goto err; - } - - /* Load the ufunc operators into the array module's namespace */ - if (InitOperators(d) < 0) { - goto err; - } - PyDict_SetItemString(d, "pi", s = PyFloat_FromDouble(NPY_PI)); Py_DECREF(s); PyDict_SetItemString(d, "e", s = PyFloat_FromDouble(NPY_E)); @@ -417,19 +330,11 @@ PyMODINIT_FUNC initumath(void) PyDict_SetItemString(d, "conj", s); PyDict_SetItemString(d, "mod", s2); - initscalarmath(m); - if (!intern_strings()) { - goto err; - } - - return RETVAL(m); - - err: - /* Check for errors */ - if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, - "cannot load umath module."); + "cannot intern umath strings while initializing _multiarray_umath."); + return -1; } - return RETVAL(NULL); + + return 0; } diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py index 6214e325c..6522c6e8a 100644 --- a/numpy/core/tests/test_arrayprint.py +++ b/numpy/core/tests/test_arrayprint.py @@ -8,6 +8,7 @@ import pytest import numpy as np from numpy.testing import ( assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT, + assert_raises_regex, ) import textwrap @@ -210,6 +211,15 @@ class TestArray2String(object): assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]') assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]') + def test_unexpected_kwarg(self): + # ensure than an appropriate TypeError + # is raised when array2string receives + # an unexpected kwarg + + with assert_raises_regex(TypeError, 'nonsense'): + np.array2string(np.array([1, 2, 3]), + nonsense=None) + def test_format_function(self): """Test custom format function for each element in array.""" def _format_function(x): diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index 942554cae..8e058d5fb 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -620,6 +620,10 @@ class TestDateTime(object): assert_equal(pickle.loads(pickle.dumps(dt)), dt) dt = np.dtype('M8[W]') assert_equal(pickle.loads(pickle.dumps(dt)), dt) + scalar = np.datetime64('2016-01-01T00:00:00.000000000') + assert_equal(pickle.loads(pickle.dumps(scalar)), scalar) + delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000') + assert_equal(pickle.loads(pickle.dumps(delta)), delta) # Check that loading pickles from 1.6 works pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \ @@ -1698,7 +1702,6 @@ class TestDateTime(object): assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'), np.datetime64('NaT')) - def test_datetime_busdaycalendar(self): # Check that it removes NaT, duplicates, and weekends # and sorts the result. diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 5d66d963f..10ef16800 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -518,3 +518,8 @@ class TestPositiveOnNonNumerical(_DeprecationTestCase): # 2018-06-28, 1.16.0 def test_positive_on_non_number(self): self.assert_deprecated(operator.pos, args=(np.array('foo'),)) + +class TestFromstring(_DeprecationTestCase): + # 2017-10-19, 1.14 + def test_fromstring(self): + self.assert_deprecated(np.fromstring, args=('\x00'*80,)) diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py index 8ce374a75..6b5b9c06e 100644 --- a/numpy/core/tests/test_einsum.py +++ b/numpy/core/tests/test_einsum.py @@ -965,7 +965,6 @@ class TestEinsumPath(object): path, path_str = np.einsum_path(*edge_test4, optimize='optimal') self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) - def test_path_type_input(self): # Test explicit path handeling path_test = self.build_operands('dcc,fce,ea,dbf->ab') diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py index 276cd9f93..1934d542a 100644 --- a/numpy/core/tests/test_indexing.py +++ b/numpy/core/tests/test_indexing.py @@ -194,7 +194,6 @@ class TestIndexing(object): assert_raises(IndexError, arr.__getitem__, (slice(None), index)) - def test_boolean_indexing_onedim(self): # Indexing a 2-dimensional array with # boolean array of length one diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 1511f5b6b..1c59abaa7 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -108,7 +108,6 @@ class TestFlags(object): assert_equal(self.a.flags['X'], False) assert_equal(self.a.flags['WRITEBACKIFCOPY'], False) - def test_string_align(self): a = np.zeros(4, dtype=np.dtype('|S4')) assert_(a.flags.aligned) @@ -2729,7 +2728,6 @@ class TestMethods(object): # Order of axis argument doesn't matter: assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]]) - def test_diagonal_view_notwriteable(self): # this test is only for 1.9, the diagonal view will be # writeable in 1.10. @@ -4386,7 +4384,6 @@ class TestIO(object): d.tofile(f) assert_equal(os.path.getsize(self.filename), d.nbytes * 2) - def test_io_open_buffered_fromfile(self): # gh-6632 self.x.tofile(self.filename) @@ -4748,55 +4745,72 @@ class TestRecord(object): # Error raised when multiple fields have the same name assert_raises(ValueError, test_assign) - if sys.version_info[0] >= 3: - def test_bytes_fields(self): - # Bytes are not allowed in field names and not recognized in titles - # on Py3 - assert_raises(TypeError, np.dtype, [(b'a', int)]) - assert_raises(TypeError, np.dtype, [(('b', b'a'), int)]) - - dt = np.dtype([((b'a', 'b'), int)]) - assert_raises(TypeError, dt.__getitem__, b'a') - - x = np.array([(1,), (2,), (3,)], dtype=dt) - assert_raises(IndexError, x.__getitem__, b'a') - - y = x[0] - assert_raises(IndexError, y.__getitem__, b'a') - - def test_multiple_field_name_unicode(self): - def test_assign_unicode(): - dt = np.dtype([("\u20B9", "f8"), - ("B", "f8"), - ("\u20B9", "f8")]) - - # Error raised when multiple fields have the same name(unicode included) - assert_raises(ValueError, test_assign_unicode) - - else: - def test_unicode_field_titles(self): - # Unicode field titles are added to field dict on Py2 - title = u'b' - dt = np.dtype([((title, 'a'), int)]) - dt[title] - dt['a'] - x = np.array([(1,), (2,), (3,)], dtype=dt) - x[title] - x['a'] - y = x[0] - y[title] - y['a'] - - def test_unicode_field_names(self): - # Unicode field names are converted to ascii on Python 2: - encodable_name = u'b' - assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b') - assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b') - - # But raises UnicodeEncodeError if it can't be encoded: - nonencodable_name = u'\uc3bc' - assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)]) - assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)]) + @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3") + def test_bytes_fields(self): + # Bytes are not allowed in field names and not recognized in titles + # on Py3 + assert_raises(TypeError, np.dtype, [(b'a', int)]) + assert_raises(TypeError, np.dtype, [(('b', b'a'), int)]) + + dt = np.dtype([((b'a', 'b'), int)]) + assert_raises(TypeError, dt.__getitem__, b'a') + + x = np.array([(1,), (2,), (3,)], dtype=dt) + assert_raises(IndexError, x.__getitem__, b'a') + + y = x[0] + assert_raises(IndexError, y.__getitem__, b'a') + + @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3") + def test_multiple_field_name_unicode(self): + def test_assign_unicode(): + dt = np.dtype([("\u20B9", "f8"), + ("B", "f8"), + ("\u20B9", "f8")]) + + # Error raised when multiple fields have the same name(unicode included) + assert_raises(ValueError, test_assign_unicode) + + @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2") + def test_unicode_field_titles(self): + # Unicode field titles are added to field dict on Py2 + title = u'b' + dt = np.dtype([((title, 'a'), int)]) + dt[title] + dt['a'] + x = np.array([(1,), (2,), (3,)], dtype=dt) + x[title] + x['a'] + y = x[0] + y[title] + y['a'] + + @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2") + def test_unicode_field_names(self): + # Unicode field names are converted to ascii on Python 2: + encodable_name = u'b' + assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b') + assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b') + + # But raises UnicodeEncodeError if it can't be encoded: + nonencodable_name = u'\uc3bc' + assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)]) + assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)]) + + def test_fromarrays_unicode(self): + # A single name string provided to fromarrays() is allowed to be unicode + # on both Python 2 and 3: + x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4') + assert_equal(x['a'][0], 0) + assert_equal(x['b'][0], 1) + + def test_unicode_order(self): + # Test that we can sort with order as a unicode field name in both Python 2 and + # 3: + name = u'b' + x = np.array([1, 3, 2], dtype=[(name, int)]) + x.sort(order=name) + assert_equal(x[u'b'], np.array([1, 2, 3])) def test_field_names(self): # Test unicode and 8-bit / byte strings can be used @@ -4909,7 +4923,6 @@ class TestRecord(object): assert_equal(collect_warnings(c[['f0', 'f2']].view, 'i8,i8'), [FutureWarning]) - def test_record_hash(self): a = np.array([(1, 2), (1, 2)], dtype='i1,i2') a.flags.writeable = False @@ -6470,6 +6483,14 @@ class TestNewBufferProtocol(object): # Issue #4015. self._check_roundtrip(0) + def test_invalid_buffer_format(self): + # datetime64 cannot be used fully in a buffer yet + # Should be fixed in the next Numpy major release + dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) + a = np.empty(3, dt) + assert_raises((ValueError, BufferError), memoryview, a) + assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]')) + def test_export_simple_1d(self): x = np.array([1, 2, 3, 4, 5], dtype='i') y = memoryview(x) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 13bc6b34a..5e8165bc5 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -2358,7 +2358,6 @@ class TestIterNested(object): j.close() assert_equal(a, [[1, 2, 3], [4, 5, 6]]) - def test_dtype_buffered(self): # Test nested iteration with buffering to change dtype diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 53486dc51..e7181736f 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -1275,7 +1275,6 @@ class TestArrayComparisons(object): assert_equal(a == None, [False, False, False]) assert_equal(a != None, [True, True, True]) - def test_array_equiv(self): res = np.array_equiv(np.array([1, 2]), np.array([1, 2])) assert_(res) @@ -1530,7 +1529,7 @@ class TestClip(object): m = -0.5 M = 0.6 self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) + self.clip(ac, m, M, ac) assert_array_strict_equal(a, ac) def test_noncontig_inplace(self): @@ -1543,7 +1542,7 @@ class TestClip(object): m = -0.5 M = 0.6 self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) + self.clip(ac, m, M, ac) assert_array_equal(a, ac) def test_type_cast_01(self): @@ -1722,6 +1721,22 @@ class TestClip(object): self.clip(a, m, M, act) assert_array_strict_equal(ac, act) + def test_clip_with_out_transposed(self): + # Test that the out argument works when tranposed + a = np.arange(16).reshape(4, 4) + out = np.empty_like(a).T + a.clip(4, 10, out=out) + expected = self.clip(a, 4, 10) + assert_array_equal(out, expected) + + def test_clip_with_out_memory_overlap(self): + # Test that the out argument works when it has memory overlap + a = np.arange(16).reshape(4, 4) + ac = a.copy() + a[:-1].clip(4, 10, out=a[1:]) + expected = self.clip(ac[:-1], 4, 10) + assert_array_equal(a[1:], expected) + def test_clip_inplace_array(self): # Test native double input with array min/max a = self._generate_data(self.nr, self.nc) diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index f8e297736..c38625dac 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -1831,7 +1831,6 @@ class TestRegression(object): assert_equal(oct(a), oct(0)) assert_equal(hex(a), hex(0)) - def test_object_array_self_copy(self): # An object array being copied into itself DECREF'ed before INCREF'ing # causing segmentation faults (gh-3787) @@ -2371,6 +2370,13 @@ class TestRegression(object): del va assert_equal(x, b'\x00\x00\x00\x00') + def test_void_getitem(self): + # Test fix for gh-11668. + assert_(np.array([b'a'], 'V1').astype('O') == b'a') + assert_(np.array([b'ab'], 'V2').astype('O') == b'ab') + assert_(np.array([b'abc'], 'V3').astype('O') == b'abc') + assert_(np.array([b'abcd'], 'V4').astype('O') == b'abcd') + def test_structarray_title(self): # The following used to segfault on pypy, due to NPY_TITLE_KEY # not working properly and resulting to double-decref of the diff --git a/numpy/core/tests/test_scalarbuffer.py b/numpy/core/tests/test_scalarbuffer.py index 6d57a5014..cb6c521e1 100644 --- a/numpy/core/tests/test_scalarbuffer.py +++ b/numpy/core/tests/test_scalarbuffer.py @@ -5,7 +5,7 @@ import sys import numpy as np import pytest -from numpy.testing import assert_, assert_equal +from numpy.testing import assert_, assert_equal, assert_raises # PEP3118 format strings for native (standard alignment and byteorder) types scalars_and_codes = [ @@ -77,3 +77,28 @@ class TestScalarPEP3118(object): mv_a = memoryview(a) assert_equal(mv_x.itemsize, mv_a.itemsize) assert_equal(mv_x.format, mv_a.format) + + def test_datetime_memoryview(self): + # gh-11656 + # Values verified with v1.13.3, shape is not () as in test_scalar_dim + def as_dict(m): + return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize, + ndim=m.ndim, format=m.format) + + dt1 = np.datetime64('2016-01-01') + dt2 = np.datetime64('2017-01-01') + expected = {'strides': (1,), 'itemsize': 1, 'ndim': 1, + 'shape': (8,), 'format': 'B'} + v = memoryview(dt1) + res = as_dict(v) + assert_equal(res, expected) + + v = memoryview(dt2 - dt1) + res = as_dict(v) + assert_equal(res, expected) + + dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) + a = np.empty(1, dt) + # Fails to create a PEP 3118 valid buffer + assert_raises((ValueError, BufferError), memoryview, a[0]) + diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 7679a2b85..c15ce83f6 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -14,7 +14,7 @@ from numpy.testing import ( assert_, assert_equal, assert_raises, assert_raises_regex, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_allclose, assert_no_warnings, suppress_warnings, - _gen_alignment_data, + _gen_alignment_data, assert_warns ) @@ -1173,7 +1173,6 @@ class TestBitwiseUFuncs(object): assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg) assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg) - def test_identity(self): assert_(np.bitwise_or.identity == 0, 'bitwise_or') assert_(np.bitwise_xor.identity == 0, 'bitwise_xor') @@ -1340,6 +1339,10 @@ class TestMinMax(object): assert_equal(np.min(r), np.nan) assert_equal(len(sup.log), n) + def test_minimize_warns(self): + # gh 11589 + assert_warns(RuntimeWarning, np.minimum, np.nan, 1) + class TestAbsoluteNegative(object): def test_abs_neg_blocked(self): diff --git a/numpy/core/umath.py b/numpy/core/umath.py new file mode 100644 index 000000000..efa213b1a --- /dev/null +++ b/numpy/core/umath.py @@ -0,0 +1,32 @@ +""" +Create the numpy.core.umath namespace for backward compatibility. In v1.16 +the multiarray and umath c-extension modules were merged into a single +_multiarray_umath extension module. So we replicate the old namespace +by importing from the extension module. +""" + +from . import _multiarray_umath +from numpy.core._multiarray_umath import * +from numpy.core._multiarray_umath import _add_newdoc_ufunc, _arg + +__all__ = ['ERR_CALL', 'ERR_DEFAULT', 'ERR_IGNORE', 'ERR_LOG', 'ERR_PRINT', + 'ERR_RAISE', 'ERR_WARN', 'FLOATING_POINT_SUPPORT', 'FPE_DIVIDEBYZERO', + 'FPE_INVALID', 'FPE_OVERFLOW', 'FPE_UNDERFLOW', 'NAN', 'NINF', 'NZERO', + 'PINF', 'PZERO', 'SHIFT_DIVIDEBYZERO', 'SHIFT_INVALID', 'SHIFT_OVERFLOW', + 'SHIFT_UNDERFLOW', 'UFUNC_BUFSIZE_DEFAULT', 'UFUNC_PYVALS_NAME', + '_add_newdoc_ufunc', '_arg', + 'absolute', 'add', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', + 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'cbrt', + 'ceil', 'conj', 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', + 'degrees', 'divide', 'divmod', 'e', 'equal', 'euler_gamma', 'exp', 'exp2', + 'expm1', 'fabs', 'floor', 'floor_divide', 'float_power', 'fmax', 'fmin', + 'fmod', 'frexp', 'frompyfunc', 'gcd', 'geterrobj', 'greater', + 'greater_equal', 'heaviside', 'hypot', 'invert', 'isfinite', 'isinf', + 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less', 'less_equal', + 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', + 'logical_not', 'logical_or', 'logical_xor', 'maximum', 'minimum', 'mod', + 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'pi', 'positive', + 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', + 'rint', 'seterrobj', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', + 'square', 'subtract', 'tan', 'tanh', 'true_divide', 'trunc'] + diff --git a/numpy/doc/broadcasting.py b/numpy/doc/broadcasting.py index 1dc4f60bf..6c3a4bc75 100644 --- a/numpy/doc/broadcasting.py +++ b/numpy/doc/broadcasting.py @@ -53,9 +53,10 @@ dimensions are compatible when 2) one of them is 1 If these conditions are not met, a -``ValueError: frames are not aligned`` exception is thrown, indicating that -the arrays have incompatible shapes. The size of the resulting array -is the maximum size along each dimension of the input arrays. +``ValueError: operands could not be broadcast together`` exception is +thrown, indicating that the arrays have incompatible shapes. The size of +the resulting array is the maximum size along each dimension of the input +arrays. Arrays do not need to have the same *number* of dimensions. For example, if you have a ``256x256x3`` array of RGB values, and you want to scale @@ -124,7 +125,7 @@ An example of broadcasting in practice:: (5,) >>> x + y - <type 'exceptions.ValueError'>: shape mismatch: objects cannot be broadcast to a single shape + ValueError: operands could not be broadcast together with shapes (4,) (5,) >>> xx.shape (4, 1) diff --git a/numpy/f2py/__main__.py b/numpy/f2py/__main__.py index cb8f261c1..6eff41099 100644 --- a/numpy/f2py/__main__.py +++ b/numpy/f2py/__main__.py @@ -1,27 +1,6 @@ # See http://cens.ioc.ee/projects/f2py2e/ from __future__ import division, print_function -import os -import sys -for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]: - try: - i = sys.argv.index("--" + mode) - del sys.argv[i] - break - except ValueError: - pass -os.environ["NO_SCIPY_IMPORT"] = "f2py" -if mode == "g3-numpy": - sys.stderr.write("G3 f2py support is not implemented, yet.\\n") - sys.exit(1) -elif mode == "2e-numeric": - from f2py2e import main -elif mode == "2e-numarray": - sys.argv.append("-DNUMARRAY") - from f2py2e import main -elif mode == "2e-numpy": - from numpy.f2py import main -else: - sys.stderr.write("Unknown mode: " + repr(mode) + "\\n") - sys.exit(1) +from f2py2e import main + main() diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 19ce8c145..99ff030e3 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -2403,7 +2403,7 @@ def _selected_real_kind_func(p, r=0, radix=0): if p < 16: return 8 machine = platform.machine().lower() - if machine.startswith('power') or machine.startswith('ppc64'): + if machine.startswith(('aarch64', 'power', 'ppc64', 's390x')): if p <= 20: return 16 else: diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 254f99966..8750ed0b3 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -644,13 +644,25 @@ def main(): from numpy.distutils.system_info import show_all show_all() return + + # Probably outdated options that were not working before 1.16 + if '--g3-numpy' in sys.argv[1:]: + sys.stderr.write("G3 f2py support is not implemented, yet.\\n") + sys.exit(1) + elif '--2e-numeric' in sys.argv[1:]: + sys.argv.remove('--2e-numeric') + elif '--2e-numarray' in sys.argv[1:]: + # Note that this errors becaust the -DNUMARRAY argument is + # not recognized. Just here for back compatibility and the + # error message. + sys.argv.append("-DNUMARRAY") + sys.argv.remove('--2e-numarray') + elif '--2e-numpy' in sys.argv[1:]: + sys.argv.remove('--2e-numpy') + else: + pass + if '-c' in sys.argv[1:]: run_compile() else: run_main(sys.argv[1:]) - -# if __name__ == "__main__": -# main() - - -# EOF diff --git a/numpy/f2py/setup.py b/numpy/f2py/setup.py index 73cb3b8bf..e95b9584f 100644 --- a/numpy/f2py/setup.py +++ b/numpy/f2py/setup.py @@ -18,8 +18,6 @@ Pearu Peterson """ from __future__ import division, print_function -__version__ = "$Id: setup.py,v 1.32 2005/01/30 17:22:14 pearu Exp $" - import os import sys from distutils.dep_util import newer @@ -27,60 +25,22 @@ from numpy.distutils import log from numpy.distutils.core import setup from numpy.distutils.misc_util import Configuration -from __version__ import version - - -def _get_f2py_shebang(): - """ Return shebang line for f2py script - If we are building a binary distribution format, then the shebang line - should be ``#!python`` rather than ``#!`` followed by the contents of - ``sys.executable``. - """ - if set(('bdist_wheel', 'bdist_egg', 'bdist_wininst', - 'bdist_rpm')).intersection(sys.argv): - return '#!python' - return '#!' + sys.executable +from __version__ import version def configuration(parent_package='', top_path=None): config = Configuration('f2py', parent_package, top_path) - config.add_data_dir('tests') - - config.add_data_files('src/fortranobject.c', - 'src/fortranobject.h', - ) - - config.make_svn_version_py() - - def generate_f2py_py(build_dir): - f2py_exe = 'f2py' + os.path.basename(sys.executable)[6:] - if f2py_exe[-4:] == '.exe': - f2py_exe = f2py_exe[:-4] + '.py' - if 'bdist_wininst' in sys.argv and f2py_exe[-3:] != '.py': - f2py_exe = f2py_exe + '.py' - target = os.path.join(build_dir, f2py_exe) - if newer(__file__, target): - log.info('Creating %s', target) - f = open(target, 'w') - f.write(_get_f2py_shebang() + '\n') - mainloc = os.path.join(os.path.dirname(__file__), "__main__.py") - with open(mainloc) as mf: - f.write(mf.read()) - f.close() - return target - - config.add_scripts(generate_f2py_py) - - log.info('F2PY Version %s', config.get_version()) - + config.add_data_files( + 'src/fortranobject.c', + 'src/fortranobject.h') return config + if __name__ == "__main__": config = configuration(top_path='') - print('F2PY Version', version) config = config.todict() config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\ diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py index dc40ac67b..c1757150e 100644 --- a/numpy/lib/__init__.py +++ b/numpy/lib/__init__.py @@ -26,7 +26,7 @@ from .financial import * from .arrayterator import Arrayterator from .arraypad import * from ._version import * -from numpy.core.multiarray import tracemalloc_domain +from numpy.core._multiarray_umath import tracemalloc_domain __all__ = ['emath', 'math', 'tracemalloc_domain'] __all__ += type_check.__all__ diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index 6f1295f09..ab00b1444 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -37,6 +37,7 @@ from __future__ import division, absolute_import, print_function import os import sys +import warnings import shutil import io @@ -85,9 +86,10 @@ def _python2_bz2open(fn, mode, encoding, newline): if "t" in mode: # BZ2File is missing necessary functions for TextIOWrapper - raise ValueError("bz2 text files not supported in python2") - else: - return bz2.BZ2File(fn, mode) + warnings.warn("Assuming latin1 encoding for bz2 text file in Python2", + RuntimeWarning, stacklevel=5) + mode = mode.replace("t", "") + return bz2.BZ2File(fn, mode) def _python2_gzipopen(fn, mode, encoding, newline): """ Wrapper to open gzip in text mode. diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py index 5880ea154..62e9b6d50 100644 --- a/numpy/lib/arraysetops.py +++ b/numpy/lib/arraysetops.py @@ -82,6 +82,11 @@ def ediff1d(ary, to_end=None, to_begin=None): # force a 1d array ary = np.asanyarray(ary).ravel() + # we have unit tests enforcing + # propagation of the dtype of input + # ary to returned result + dtype_req = ary.dtype + # fast track default case if to_begin is None and to_end is None: return ary[1:] - ary[:-1] @@ -89,13 +94,23 @@ def ediff1d(ary, to_end=None, to_begin=None): if to_begin is None: l_begin = 0 else: - to_begin = np.asanyarray(to_begin).ravel() + to_begin = np.asanyarray(to_begin) + if not np.can_cast(to_begin, dtype_req): + raise TypeError("dtype of to_begin must be compatible " + "with input ary") + + to_begin = to_begin.ravel() l_begin = len(to_begin) if to_end is None: l_end = 0 else: - to_end = np.asanyarray(to_end).ravel() + to_end = np.asanyarray(to_end) + if not np.can_cast(to_end, dtype_req): + raise TypeError("dtype of to_end must be compatible " + "with input ary") + + to_end = to_end.ravel() l_end = len(to_end) # do the calculation in place and copy to_begin and to_end @@ -312,12 +327,12 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. return_indices : bool - If True, the indices which correspond to the intersection of the - two arrays are returned. The first instance of a value is used - if there are multiple. Default is False. - - .. versionadded:: 1.15.0 - + If True, the indices which correspond to the intersection of the two + arrays are returned. The first instance of a value is used if there are + multiple. Default is False. + + .. versionadded:: 1.15.0 + Returns ------- intersect1d : ndarray @@ -326,7 +341,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): The indices of the first occurrences of the common values in `ar1`. Only provided if `return_indices` is True. comm2 : ndarray - The indices of the first occurrences of the common values in `ar2`. + The indices of the first occurrences of the common values in `ar2`. Only provided if `return_indices` is True. @@ -345,7 +360,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): >>> from functools import reduce >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) array([3]) - + To return the indices of the values common to the input arrays along with the intersected values: >>> x = np.array([1, 1, 2, 3, 4]) @@ -355,8 +370,11 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): (array([0, 2, 4]), array([1, 0, 2])) >>> xy, x[x_ind], y[y_ind] (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4])) - + """ + ar1 = np.asanyarray(ar1) + ar2 = np.asanyarray(ar2) + if not assume_unique: if return_indices: ar1, ind1 = unique(ar1, return_index=True) @@ -367,7 +385,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): else: ar1 = ar1.ravel() ar2 = ar2.ravel() - + aux = np.concatenate((ar1, ar2)) if return_indices: aux_sort_indices = np.argsort(aux, kind='mergesort') @@ -389,6 +407,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): else: return int1d + def setxor1d(ar1, ar2, assume_unique=False): """ Find the set exclusive-or of two arrays. diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 75a39beaa..2992e92bb 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -305,12 +305,17 @@ def average(a, axis=None, weights=None, returned=False): Returns ------- - average, [sum_of_weights] : array_type or double - Return the average along the specified axis. When returned is `True`, + retval, [sum_of_weights] : array_type or double + Return the average along the specified axis. When `returned` is `True`, return a tuple with the average as the first element and the sum - of the weights as the second element. The return type is `Float` - if `a` is of integer type, otherwise it is of the same type as `a`. - `sum_of_weights` is of the same type as `average`. + of the weights as the second element. `sum_of_weights` is of the + same type as `retval`. The result dtype follows a genereal pattern. + If `weights` is None, the result dtype will be that of `a` , or ``float64`` + if `a` is integral. Otherwise, if `weights` is not None and `a` is non- + integral, the result type will be the type of lowest precision capable of + representing values of both `a` and `weights`. If `a` happens to be + integral, the previous rules still applies but the result dtype will + at least be ``float64``. Raises ------ @@ -327,6 +332,8 @@ def average(a, axis=None, weights=None, returned=False): ma.average : average for masked arrays -- useful if your data contains "missing" values + numpy.result_type : Returns the type that results from applying the + numpy type promotion rules to the arguments. Examples -------- @@ -346,10 +353,16 @@ def average(a, axis=None, weights=None, returned=False): >>> np.average(data, axis=1, weights=[1./4, 3./4]) array([ 0.75, 2.75, 4.75]) >>> np.average(data, weights=[1./4, 3./4]) + Traceback (most recent call last): ... TypeError: Axis must be specified when shapes of a and weights differ. - + + >>> a = np.ones(5, dtype=np.float128) + >>> w = np.ones(5, dtype=np.complex64) + >>> avg = np.average(a, weights=w) + >>> print(avg.dtype) + complex256 """ a = np.asanyarray(a) @@ -1769,8 +1782,8 @@ class vectorize(object): Generalized function class. Define a vectorized function which takes a nested sequence of objects or - numpy arrays as inputs and returns an single or tuple of numpy array as - output. The vectorized function evaluates `pyfunc` over successive tuples + numpy arrays as inputs and returns a single numpy array or a tuple of numpy + arrays. The vectorized function evaluates `pyfunc` over successive tuples of the input arrays like the python map function, except it uses the broadcasting rules of numpy. diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py index 422b356f7..f03f30fb0 100644 --- a/numpy/lib/histograms.py +++ b/numpy/lib/histograms.py @@ -260,6 +260,32 @@ def _get_outer_edges(a, range): return first_edge, last_edge +def _unsigned_subtract(a, b): + """ + Subtract two values where a >= b, and produce an unsigned result + + This is needed when finding the difference between the upper and lower + bound of an int16 histogram + """ + # coerce to a single type + signed_to_unsigned = { + np.byte: np.ubyte, + np.short: np.ushort, + np.intc: np.uintc, + np.int_: np.uint, + np.longlong: np.ulonglong + } + dt = np.result_type(a, b) + try: + dt = signed_to_unsigned[dt.type] + except KeyError: + return np.subtract(a, b, dtype=dt) + else: + # we know the inputs are integers, and we are deliberately casting + # signed to unsigned + return np.subtract(a, b, casting='unsafe', dtype=dt) + + def _get_bin_edges(a, bins, range, weights): """ Computes the bins used internally by `histogram`. @@ -311,7 +337,7 @@ def _get_bin_edges(a, bins, range, weights): # Do not call selectors on empty arrays width = _hist_bin_selectors[bin_name](a) if width: - n_equal_bins = int(np.ceil((last_edge - first_edge) / width)) + n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width)) else: # Width can be zero for some estimators, e.g. FD when # the IQR of the data is zero. @@ -703,7 +729,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None, n = np.zeros(n_equal_bins, ntype) # Pre-compute histogram scaling factor - norm = n_equal_bins / (last_edge - first_edge) + norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge) # We iterate over blocks here for two reasons: the first is that for # large arrays, it is actually faster (for example for a 10^8 array it @@ -731,7 +757,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None, # Compute the bin indices, and for values that lie exactly on # last_edge we need to subtract one - f_indices = (tmp_a - first_edge) * norm + f_indices = _unsigned_subtract(tmp_a, first_edge) * norm indices = f_indices.astype(np.intp) indices[indices == n_equal_bins] -= 1 diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index abd2da1a2..8d6b0f139 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -1178,13 +1178,15 @@ def nanquantile(a, q, axis=None, out=None, overwrite_input=False, This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: - * linear: ``i + (j - i) * fraction``, where ``fraction`` - is the fractional part of the index surrounded by ``i`` - and ``j``. - * lower: ``i``. - * higher: ``j``. - * nearest: ``i`` or ``j``, whichever is nearest. - * midpoint: ``(i + j) / 2``. + + * linear: ``i + (j - i) * fraction``, where ``fraction`` + is the fractional part of the index surrounded by ``i`` + and ``j``. + * lower: ``i``. + * higher: ``j``. + * nearest: ``i`` or ``j``, whichever is nearest. + * midpoint: ``(i + j) / 2``. + keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py index 0e691f56e..9f3b84732 100644 --- a/numpy/lib/polynomial.py +++ b/numpy/lib/polynomial.py @@ -396,7 +396,11 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` to points `(x, y)`. Returns a vector of coefficients `p` that minimises - the squared error. + the squared error in the order `deg`, `deg-1`, ... `0`. + + The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class + method is recommended for new code as it is more stable numerically. See + the documentation of the method for more information. Parameters ---------- diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index d31d8a939..66f534734 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -688,7 +688,7 @@ def array_split(ary, indices_or_sections, axis=0): except AttributeError: Ntotal = len(ary) try: - # handle scalar case. + # handle array case. Nsections = len(indices_or_sections) + 1 div_points = [0] + list(indices_or_sections) + [Ntotal] except TypeError: @@ -700,7 +700,7 @@ def array_split(ary, indices_or_sections, axis=0): section_sizes = ([0] + extras * [Neach_section+1] + (Nsections-extras) * [Neach_section]) - div_points = _nx.array(section_sizes).cumsum() + div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum() sub_arys = [] sary = _nx.swapaxes(ary, axis, 0) diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py index bc5993802..ca13738c1 100644 --- a/numpy/lib/stride_tricks.py +++ b/numpy/lib/stride_tricks.py @@ -242,7 +242,7 @@ def broadcast_arrays(*args, **kwargs): subok = kwargs.pop('subok', False) if kwargs: raise TypeError('broadcast_arrays() got an unexpected keyword ' - 'argument {!r}'.format(kwargs.keys()[0])) + 'argument {!r}'.format(list(kwargs.keys())[0])) args = [np.array(_m, copy=False, subok=subok) for _m in args] shape = _broadcast_shape(*args) diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py index 32812990c..85788941c 100644 --- a/numpy/lib/tests/test__datasource.py +++ b/numpy/lib/tests/test__datasource.py @@ -2,11 +2,14 @@ from __future__ import division, absolute_import, print_function import os import sys +import pytest from tempfile import mkdtemp, mkstemp, NamedTemporaryFile from shutil import rmtree -from numpy.testing import assert_, assert_equal, assert_raises, SkipTest import numpy.lib._datasource as datasource +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_warns, SkipTest + ) if sys.version_info[0] >= 3: import urllib.request as urllib_request @@ -30,14 +33,14 @@ def urlopen_stub(url, data=None): old_urlopen = None -def setup(): +def setup_module(): global old_urlopen old_urlopen = urllib_request.urlopen urllib_request.urlopen = urlopen_stub -def teardown(): +def teardown_module(): urllib_request.urlopen = old_urlopen # A valid website for more robust testing @@ -161,6 +164,24 @@ class TestDataSourceOpen(object): fp.close() assert_equal(magic_line, result) + @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Python 2 only") + def test_Bz2File_text_mode_warning(self): + try: + import bz2 + except ImportError: + # We don't have the bz2 capabilities to test. + raise SkipTest + # Test datasource's internal file_opener for BZip2 files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') + fp = bz2.BZ2File(filepath, 'w') + fp.write(magic_line) + fp.close() + with assert_warns(RuntimeWarning): + fp = self.ds.open(filepath, 'rt') + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + class TestDataSourceExists(object): def setup(self): diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index dace5ade8..4b61726d2 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -6,10 +6,13 @@ from __future__ import division, absolute_import, print_function import numpy as np import sys -from numpy.testing import assert_array_equal, assert_equal, assert_raises +from numpy.testing import (assert_array_equal, assert_equal, + assert_raises, assert_raises_regex) from numpy.lib.arraysetops import ( ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin ) +import pytest + class TestSetOps(object): @@ -30,19 +33,30 @@ class TestSetOps(object): ed = np.array([1, 2, 5]) c = intersect1d(a, b) assert_array_equal(c, ed) - assert_array_equal([], intersect1d([], [])) - + + def test_intersect1d_array_like(self): + # See gh-11772 + class Test(object): + def __array__(self): + return np.arange(3) + + a = Test() + res = intersect1d(a, a) + assert_array_equal(res, a) + res = intersect1d([1, 2, 3], [1, 2, 3]) + assert_array_equal(res, [1, 2, 3]) + def test_intersect1d_indices(self): # unique inputs - a = np.array([1, 2, 3, 4]) + a = np.array([1, 2, 3, 4]) b = np.array([2, 1, 4, 6]) c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) ee = np.array([1, 2, 4]) assert_array_equal(c, ee) assert_array_equal(a[i1], ee) assert_array_equal(b[i2], ee) - + # non-unique inputs a = np.array([1, 2, 2, 3, 4, 3, 2]) b = np.array([1, 8, 4, 2, 2, 3, 2, 3]) @@ -51,7 +65,7 @@ class TestSetOps(object): assert_array_equal(c, ef) assert_array_equal(a[i1], ef) assert_array_equal(b[i2], ef) - + # non1d, unique inputs a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]]) b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]]) @@ -61,7 +75,7 @@ class TestSetOps(object): ea = np.array([2, 6, 7, 8]) assert_array_equal(ea, a[ui1]) assert_array_equal(ea, b[ui2]) - + # non1d, not assumed to be uniqueinputs a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]]) b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]]) @@ -71,7 +85,7 @@ class TestSetOps(object): ea = np.array([2, 7, 8]) assert_array_equal(ea, a[ui1]) assert_array_equal(ea, b[ui2]) - + def test_setxor1d(self): a = np.array([5, 7, 1, 2]) b = np.array([2, 4, 3, 1, 5]) @@ -114,6 +128,68 @@ class TestSetOps(object): assert_array_equal([7,1], ediff1d(two_elem, to_begin=7)) assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6])) + @pytest.mark.parametrize("ary, prepend, append", [ + # should fail because trying to cast + # np.nan standard floating point value + # into an integer array: + (np.array([1, 2, 3], dtype=np.int64), + None, + np.nan), + # should fail because attempting + # to downcast to smaller int type: + (np.array([1, 2, 3], dtype=np.int32), + np.array([5, 7, 2], dtype=np.int64), + None), + # should fail because attempting to cast + # two special floating point values + # to integers (on both sides of ary): + (np.array([1., 3., 9.], dtype=np.int8), + np.nan, + np.nan), + ]) + def test_ediff1d_forbidden_type_casts(self, ary, prepend, append): + # verify resolution of gh-11490 + + # specifically, raise an appropriate + # Exception when attempting to append or + # prepend with an incompatible type + msg = 'must be compatible' + with assert_raises_regex(TypeError, msg): + ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + + @pytest.mark.parametrize("ary," + "prepend," + "append," + "expected", [ + (np.array([1, 2, 3], dtype=np.int16), + 0, + None, + np.array([0, 1, 1], dtype=np.int16)), + (np.array([1, 2, 3], dtype=np.int32), + 0, + 0, + np.array([0, 1, 1, 0], dtype=np.int32)), + (np.array([1, 2, 3], dtype=np.int64), + 3, + -9, + np.array([3, 1, 1, -9], dtype=np.int64)), + ]) + def test_ediff1d_scalar_handling(self, + ary, + prepend, + append, + expected): + # maintain backwards-compatibility + # of scalar prepend / append behavior + # in ediff1d following fix for gh-11490 + actual = np.ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + assert_equal(actual, expected) + + def test_isin(self): # the tests for in1d cover most of isin's behavior # if in1d is removed, would need to change those tests to test diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index f136b5c81..561f5f938 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -310,6 +310,20 @@ class TestHistogram(object): assert_equal(d_edge.dtype, dates.dtype) assert_equal(t_edge.dtype, td) + def do_signed_overflow_bounds(self, dtype): + exponent = 8 * np.dtype(dtype).itemsize - 1 + arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype) + hist, e = histogram(arr, bins=2) + assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4]) + assert_equal(hist, [1, 1]) + + def test_signed_overflow_bounds(self): + self.do_signed_overflow_bounds(np.byte) + self.do_signed_overflow_bounds(np.short) + self.do_signed_overflow_bounds(np.intc) + self.do_signed_overflow_bounds(np.int_) + self.do_signed_overflow_bounds(np.longlong) + def do_precision_lower_bound(self, float_small, float_large): eps = np.finfo(float_large).eps diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py index 315251daa..7e9c026e4 100644 --- a/numpy/lib/tests/test_index_tricks.py +++ b/numpy/lib/tests/test_index_tricks.py @@ -113,7 +113,6 @@ class TestRavelUnravelIndex(object): assert_(x.flags.writeable) assert_(y.flags.writeable) - def test_0d(self): # gh-580 x = np.unravel_index(0, ()) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index f58c9e33d..1f3664d92 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -348,7 +348,6 @@ class TestSaveTxt(object): assert_raises(ValueError, np.savetxt, c, np.array(1)) assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]])) - def test_record(self): a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) c = BytesIO() @@ -2025,7 +2024,6 @@ M 33 21.99 assert_equal(test['f0'], 0) assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8")) - def test_utf8_file_nodtype_unicode(self): # bytes encoding with non-latin1 -> unicode upcast utf8 = u'\u03d6' diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py index 6d24dd624..6e4cd225d 100644 --- a/numpy/lib/tests/test_shape_base.py +++ b/numpy/lib/tests/test_shape_base.py @@ -3,6 +3,8 @@ from __future__ import division, absolute_import, print_function import numpy as np import warnings import functools +import sys +import pytest from numpy.lib.shape_base import ( apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit, @@ -14,6 +16,9 @@ from numpy.testing import ( ) +IS_64BIT = sys.maxsize > 2**32 + + def _add_keepdims(func): """ hack in keepdims behavior into a function taking an axis """ @functools.wraps(func) @@ -403,6 +408,15 @@ class TestArraySplit(object): assert_(a.dtype.type is res[-1].dtype.type) # perhaps should check higher dimensions + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + def test_integer_split_2D_rows_greater_max_int32(self): + a = np.broadcast_to([0], (1 << 32, 2)) + res = array_split(a, 4) + chunk = np.broadcast_to([0], (1 << 30, 2)) + tgt = [chunk] * 4 + for i in range(len(tgt)): + assert_equal(res[i].shape, tgt[i].shape) + def test_index_split_simple(self): a = np.arange(10) indices = [1, 5, 7] diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py index 3c2ca8b87..b2bd7da3e 100644 --- a/numpy/lib/tests/test_stride_tricks.py +++ b/numpy/lib/tests/test_stride_tricks.py @@ -3,7 +3,8 @@ from __future__ import division, absolute_import, print_function import numpy as np from numpy.core._rational_tests import rational from numpy.testing import ( - assert_equal, assert_array_equal, assert_raises, assert_ + assert_equal, assert_array_equal, assert_raises, assert_, + assert_raises_regex ) from numpy.lib.stride_tricks import ( as_strided, broadcast_arrays, _broadcast_shape, broadcast_to @@ -57,6 +58,17 @@ def test_same(): assert_array_equal(x, bx) assert_array_equal(y, by) +def test_broadcast_kwargs(): + # ensure that a TypeError is appropriately raised when + # np.broadcast_arrays() is called with any keyword + # argument other than 'subok' + x = np.arange(10) + y = np.arange(10) + + with assert_raises_regex(TypeError, + r'broadcast_arrays\(\) got an unexpected keyword*'): + broadcast_arrays(x, y, dtype='float64') + def test_one_off(): x = np.array([[1, 2, 3]]) diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index c3b76ada7..ccc437663 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -133,11 +133,6 @@ def _linalgRealType(t): """Cast the type t to either double or cdouble.""" return double -_complex_types_map = {single : csingle, - double : cdouble, - csingle : csingle, - cdouble : cdouble} - def _commonType(*arrays): # in lite version, use higher precision (always double or cdouble) result_type = single @@ -542,6 +537,8 @@ def matrix_power(a, n): of the same shape as M is returned. If ``n < 0``, the inverse is computed and then raised to the ``abs(n)``. + .. note:: Stacks of object matrices are not currently supported. + Parameters ---------- a : (..., M, M) array_like @@ -604,6 +601,16 @@ def matrix_power(a, n): except TypeError: raise TypeError("exponent must be an integer") + # Fall back on dot for object arrays. Object arrays are not supported by + # the current implementation of matmul using einsum + if a.dtype != object: + fmatmul = matmul + elif a.ndim == 2: + fmatmul = dot + else: + raise NotImplementedError( + "matrix_power not supported for stacks of object arrays") + if n == 0: a = empty_like(a) a[...] = eye(a.shape[-2], dtype=a.dtype) @@ -618,20 +625,20 @@ def matrix_power(a, n): return a elif n == 2: - return matmul(a, a) + return fmatmul(a, a) elif n == 3: - return matmul(matmul(a, a), a) + return fmatmul(fmatmul(a, a), a) # Use binary decomposition to reduce the number of matrix multiplications. # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to # increasing powers of 2, and multiply into the result as needed. z = result = None while n > 0: - z = a if z is None else matmul(z, z) + z = a if z is None else fmatmul(z, z) n, bit = divmod(n, 2) if bit: - result = z if result is None else matmul(result, z) + result = z if result is None else fmatmul(result, z) return result @@ -2115,7 +2122,6 @@ def lstsq(a, b, rcond="warn"): if is_1d: b = b[:, newaxis] _assertRank2(a, b) - _assertNoEmpty2d(a, b) # TODO: relax this constraint m, n = a.shape[-2:] m2, n_rhs = b.shape[-2:] if m != m2: @@ -2146,7 +2152,16 @@ def lstsq(a, b, rcond="warn"): signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid' extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq) + if n_rhs == 0: + # lapack can't handle n_rhs = 0 - so allocate the array one larger in that axis + b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype) x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj) + if m == 0: + x[...] = 0 + if n_rhs == 0: + # remove the item we added + x = x[..., :n_rhs] + resids = resids[..., :n_rhs] # remove the axis we added if is_1d: diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 0df673884..98a77d8f5 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -875,14 +875,12 @@ class TestDet(DetCases): class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase): def do(self, a, b, tags): - if 'size-0' in tags: - assert_raises(LinAlgError, linalg.lstsq, a, b) - return - arr = np.asarray(a) m, n = arr.shape u, s, vt = linalg.svd(a, 0) x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1) + if m == 0: + assert_((x == 0).all()) if m <= n: assert_almost_equal(b, dot(a, x)) assert_equal(rank, m) @@ -923,78 +921,117 @@ class TestLstsq(LstsqCases): # Warning should be raised exactly once (first command) assert_(len(w) == 1) - + @pytest.mark.parametrize(["m", "n", "n_rhs"], [ + (4, 2, 2), + (0, 4, 1), + (0, 4, 2), + (4, 0, 1), + (4, 0, 2), + (4, 2, 0), + (0, 0, 0) + ]) + def test_empty_a_b(self, m, n, n_rhs): + a = np.arange(m * n).reshape(m, n) + b = np.ones((m, n_rhs)) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) + if m == 0: + assert_((x == 0).all()) + assert_equal(x.shape, (n, n_rhs)) + assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,))) + if m > n and n_rhs > 0: + # residuals are exactly the squared norms of b's columns + r = b - np.dot(a, x) + assert_almost_equal(residuals, (r * r).sum(axis=-2)) + assert_equal(rank, min(m, n)) + assert_equal(s.shape, (min(m, n),)) + + +@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO']) class TestMatrixPower(object): - R90 = array([[0, 1], [-1, 0]]) - Arb22 = array([[4, -7], [-2, 10]]) - noninv = array([[1, 0], [0, 0]]) - arbfloat = array([[[0.1, 3.2], [1.2, 0.7]], - [[0.2, 6.4], [2.4, 1.4]]]) - large = identity(10) - t = large[1, :].copy() - large[1, :] = large[0, :] - large[0, :] = t + rshft_0 = np.eye(4) + rshft_1 = rshft_0[[3, 0, 1, 2]] + rshft_2 = rshft_0[[2, 3, 0, 1]] + rshft_3 = rshft_0[[1, 2, 3, 0]] + rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3] + noninv = array([[1, 0], [0, 0]]) + stacked = np.block([[[rshft_0]]]*2) + #FIXME the 'e' dtype might work in future + dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')] - def test_large_power(self): + def test_large_power(self, dt): + power = matrix_power + rshft = self.rshft_1.astype(dt) assert_equal( - matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 2 ** 5 + 1), self.R90) + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0) assert_equal( - matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 1), self.R90) + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1) assert_equal( - matrix_power(self.R90, 2 ** 100 + 2 + 1), -self.R90) - - def test_large_power_trailing_zero(self): + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2) assert_equal( - matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 2 ** 5), identity(2)) + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3) - def testip_zero(self): + def test_power_is_zero(self, dt): def tz(M): mz = matrix_power(M, 0) assert_equal(mz, identity_like_generalized(M)) assert_equal(mz.dtype, M.dtype) - for M in [self.Arb22, self.arbfloat, self.large]: - tz(M) - - def testip_one(self): - def tz(M): - mz = matrix_power(M, 1) - assert_equal(mz, M) - assert_equal(mz.dtype, M.dtype) - for M in [self.Arb22, self.arbfloat, self.large]: - tz(M) - - def testip_two(self): - def tz(M): - mz = matrix_power(M, 2) - assert_equal(mz, matmul(M, M)) - assert_equal(mz.dtype, M.dtype) - for M in [self.Arb22, self.arbfloat, self.large]: - tz(M) - - def testip_invert(self): - def tz(M): - mz = matrix_power(M, -1) - assert_almost_equal(matmul(mz, M), identity_like_generalized(M)) - for M in [self.R90, self.Arb22, self.arbfloat, self.large]: - tz(M) - - def test_invert_noninvertible(self): - assert_raises(LinAlgError, matrix_power, self.noninv, -1) - - def test_invalid(self): - assert_raises(TypeError, matrix_power, self.R90, 1.5) - assert_raises(TypeError, matrix_power, self.R90, [1]) - assert_raises(LinAlgError, matrix_power, np.array([1]), 1) - assert_raises(LinAlgError, matrix_power, np.array([[1], [2]]), 1) - assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2)), 1) - - -class TestBoolPower(object): + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_one(self, dt): + def tz(mat): + mz = matrix_power(mat, 1) + assert_equal(mz, mat) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_two(self, dt): + def tz(mat): + mz = matrix_power(mat, 2) + mmul = matmul if mat.dtype != object else dot + assert_equal(mz, mmul(mat, mat)) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_minus_one(self, dt): + def tz(mat): + invmat = matrix_power(mat, -1) + mmul = matmul if mat.dtype != object else dot + assert_almost_equal( + mmul(invmat, mat), identity_like_generalized(mat)) + + for mat in self.rshft_all: + if dt not in self.dtnoinv: + tz(mat.astype(dt)) + + def test_exceptions_bad_power(self, dt): + mat = self.rshft_0.astype(dt) + assert_raises(TypeError, matrix_power, mat, 1.5) + assert_raises(TypeError, matrix_power, mat, [1]) + + def test_exceptions_non_square(self, dt): + assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1) + assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1) + assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1) + + def test_exceptions_not_invertible(self, dt): + if dt in self.dtnoinv: + return + mat = self.noninv.astype(dt) + assert_raises(LinAlgError, matrix_power, mat, -1) - def test_square(self): - A = array([[True, False], [True, True]]) - assert_equal(matrix_power(A, 2), A) class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase): diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 74edeb274..65ce967ae 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -215,7 +215,7 @@ def _recursive_fill_value(dtype, f): """ Recursively produce a fill value for `dtype`, calling f on scalar dtypes """ - if dtype.names: + if dtype.names is not None: vals = tuple(_recursive_fill_value(dtype[name], f) for name in dtype.names) return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d elif dtype.subdtype: @@ -433,7 +433,7 @@ def _recursive_set_fill_value(fillvalue, dt): if cdtype.subdtype: cdtype = cdtype.subdtype[0] - if cdtype.names: + if cdtype.names is not None: output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) else: output_value.append(np.array(fval, dtype=cdtype).item()) @@ -1282,7 +1282,7 @@ def _replace_dtype_fields_recursive(dtype, primitive_dtype): _recurse = _replace_dtype_fields_recursive # Do we have some name fields ? - if dtype.names: + if dtype.names is not None: descr = [] for name in dtype.names: field = dtype.fields[name] @@ -1547,7 +1547,7 @@ def _shrink_mask(m): """ Shrink a mask to nomask if possible """ - if not m.dtype.names and not m.any(): + if m.dtype.names is None and not m.any(): return nomask else: return m @@ -1733,7 +1733,7 @@ def mask_or(m1, m2, copy=False, shrink=True): names = m1.dtype.names for name in names: current1 = m1[name] - if current1.dtype.names: + if current1.dtype.names is not None: _recursive_mask_or(current1, m2[name], newmask[name]) else: umath.logical_or(current1, m2[name], newmask[name]) @@ -1750,7 +1750,7 @@ def mask_or(m1, m2, copy=False, shrink=True): (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) if (dtype1 != dtype2): raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) - if dtype1.names: + if dtype1.names is not None: # Allocate an output mask array with the properly broadcast shape. newmask = np.empty(np.broadcast(m1, m2).shape, dtype1) _recursive_mask_or(m1, m2, newmask) @@ -1793,7 +1793,7 @@ def flatten_mask(mask): def _flatmask(mask): "Flatten the mask and returns a (maybe nested) sequence of booleans." mnames = mask.dtype.names - if mnames: + if mnames is not None: return [flatten_mask(mask[name]) for name in mnames] else: return mask @@ -2431,7 +2431,7 @@ def _recursive_printoption(result, mask, printopt): """ names = result.dtype.names - if names: + if names is not None: for name in names: curdata = result[name] curmask = mask[name] @@ -2483,7 +2483,7 @@ def _recursive_filled(a, mask, fill_value): names = a.dtype.names for name in names: current = a[name] - if current.dtype.names: + if current.dtype.names is not None: _recursive_filled(current, mask[name], fill_value[name]) else: np.copyto(current, fill_value[name], where=mask[name]) @@ -2870,12 +2870,12 @@ class MaskedArray(ndarray): _data._mask = mask _data._sharedmask = not copy else: - if _data.dtype.names: + if _data.dtype.names is not None: def _recursive_or(a, b): "do a|=b on each field of a, recursively" for name in a.dtype.names: (af, bf) = (a[name], b[name]) - if af.dtype.names: + if af.dtype.names is not None: _recursive_or(af, bf) else: af |= bf @@ -2962,7 +2962,7 @@ class MaskedArray(ndarray): if isinstance(obj, ndarray): # XX: This looks like a bug -- shouldn't it check self.dtype # instead? - if obj.dtype.names: + if obj.dtype.names is not None: _mask = getmaskarray(obj) else: _mask = getmask(obj) @@ -3011,7 +3011,7 @@ class MaskedArray(ndarray): # When _mask.shape is not writable (because it's a void) pass # Finalize the fill_value for structured arrays - if self.dtype.names: + if self.dtype.names is not None: if self._fill_value is None: self._fill_value = _check_fill_value(None, self.dtype) return @@ -3295,15 +3295,14 @@ class MaskedArray(ndarray): return _dtype = _data.dtype - nbfields = len(_dtype.names or ()) if value is masked: # The mask wasn't set: create a full version. if _mask is nomask: _mask = self._mask = make_mask_none(self.shape, _dtype) # Now, set the mask to its value. - if nbfields: - _mask[indx] = tuple([True] * nbfields) + if _dtype.names is not None: + _mask[indx] = tuple([True] * len(_dtype.names)) else: _mask[indx] = True return @@ -3312,8 +3311,8 @@ class MaskedArray(ndarray): dval = getattr(value, '_data', value) # Get the _mask part of the new value mval = getmask(value) - if nbfields and mval is nomask: - mval = tuple([False] * nbfields) + if _dtype.names is not None and mval is nomask: + mval = tuple([False] * len(_dtype.names)) if _mask is nomask: # Set the data, then the mask _data[indx] = dval @@ -3328,7 +3327,7 @@ class MaskedArray(ndarray): indx = indx * umath.logical_not(_mask) _data[indx] = dval else: - if nbfields: + if _dtype.names is not None: err_msg = "Flexible 'hard' masks are not yet supported." raise NotImplementedError(err_msg) mindx = mask_or(_mask[indx], mval, copy=True) @@ -3709,7 +3708,7 @@ class MaskedArray(ndarray): if self is masked_singleton: return np.asanyarray(fill_value) - if m.dtype.names: + if m.dtype.names is not None: result = self._data.copy('K') _recursive_filled(result, self._mask, fill_value) elif not m.any(): @@ -3979,7 +3978,7 @@ class MaskedArray(ndarray): mask = mask_or(smask, omask, copy=True) odata = getdata(other) - if mask.dtype.names: + if mask.dtype.names is not None: # For possibly masked structured arrays we need to be careful, # since the standard structured array comparison will use all # fields, masked or not. To avoid masked fields influencing the diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 67a9186a8..a08a0d956 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -514,8 +514,6 @@ class TestMaskedArray(object): fill_value=999999)''') ) - - def test_str_repr_legacy(self): oldopts = np.get_printoptions() np.set_printoptions(legacy='1.13') @@ -788,7 +786,6 @@ class TestMaskedArray(object): control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)" assert_equal(str(t_2d0), control) - def test_flatten_structured_array(self): # Test flatten_structured_array on arrays # On ndarray @@ -3174,18 +3171,13 @@ class TestMaskedArrayMethods(object): assert_equal(test.mask, mask_first.mask) # Test sort on dtype with subarray (gh-8069) + # Just check that the sort does not error, structured array subarrays + # are treated as byte strings and that leads to differing behavior + # depending on endianess and `endwith`. dt = np.dtype([('v', int, 2)]) a = a.view(dt) - mask_last = mask_last.view(dt) - mask_first = mask_first.view(dt) - test = sort(a) - assert_equal(test, mask_last) - assert_equal(test.mask, mask_last.mask) - test = sort(a, endwith=False) - assert_equal(test, mask_first) - assert_equal(test.mask, mask_first.mask) def test_argsort(self): # Test argsort @@ -5022,3 +5014,18 @@ def test_astype(): x_f2 = np.array(x, dtype=x.dtype, order='F', subok=True) assert_(x_f2.flags.f_contiguous) assert_(x_f2.mask.flags.f_contiguous) + + +def test_fieldless_void(): + dt = np.dtype([]) # a void dtype with no fields + x = np.empty(4, dt) + + # these arrays contain no values, so there's little to test - but this + # shouldn't crash + mx = np.ma.array(x) + assert_equal(mx.dtype, x.dtype) + assert_equal(mx.shape, x.shape) + + mx = np.ma.array(x, mask=x) + assert_equal(mx.dtype, x.dtype) + assert_equal(mx.shape, x.shape) diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index 78392d2a2..dc72e7661 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -9,7 +9,7 @@ abc module from the stdlib, hence it is only available for Python >= 2.6. from __future__ import division, absolute_import, print_function from abc import ABCMeta, abstractmethod, abstractproperty -from numbers import Number +import numbers import numpy as np from . import polyutils as pu @@ -17,7 +17,7 @@ from . import polyutils as pu __all__ = ['ABCPolyBase'] class ABCPolyBase(object): - """An abstract base class for series classes. + """An abstract base class for immutable series classes. ABCPolyBase provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the @@ -82,6 +82,10 @@ class ABCPolyBase(object): def nickname(self): pass + @abstractproperty + def basis_name(self): + pass + @abstractmethod def _add(self): pass @@ -273,6 +277,89 @@ class ABCPolyBase(object): name = self.nickname return format % (name, coef) + @classmethod + def _repr_latex_term(cls, i, arg_str, needs_parens): + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis name, or override " + "_repr_latex_term(i, arg_str, needs_parens)") + # since we always add parens, we don't care if the expression needs them + return "{{{basis}}}_{{{i}}}({arg_str})".format( + basis=cls.basis_name, i=i, arg_str=arg_str + ) + + @staticmethod + def _repr_latex_scalar(x): + # TODO: we're stuck with disabling math formatting until we handle + # exponents in this function + return r'\text{{{}}}'.format(x) + + def _repr_latex_(self): + # get the scaled argument string to the basis functions + off, scale = self.mapparms() + if off == 0 and scale == 1: + term = 'x' + needs_parens = False + elif scale == 1: + term = '{} + x'.format( + self._repr_latex_scalar(off) + ) + needs_parens = True + elif off == 0: + term = '{}x'.format( + self._repr_latex_scalar(scale) + ) + needs_parens = True + else: + term = '{} + {}x'.format( + self._repr_latex_scalar(off), + self._repr_latex_scalar(scale) + ) + needs_parens = True + + # filter out uninteresting coefficients + filtered_coeffs = [ + (i, c) + for i, c in enumerate(self.coef) + # if not (c == 0) # handle NaN + ] + + mute = r"\color{{LightGray}}{{{}}}".format + + parts = [] + for i, c in enumerate(self.coef): + # prevent duplication of + and - signs + if i == 0: + coef_str = '{}'.format(self._repr_latex_scalar(c)) + elif not isinstance(c, numbers.Real): + coef_str = ' + ({})'.format(self._repr_latex_scalar(c)) + elif not np.signbit(c): + coef_str = ' + {}'.format(self._repr_latex_scalar(c)) + else: + coef_str = ' - {}'.format(self._repr_latex_scalar(-c)) + + # produce the string for the term + term_str = self._repr_latex_term(i, term, needs_parens) + if term_str == '1': + part = coef_str + else: + part = r'{}\,{}'.format(coef_str, term_str) + + if c == 0: + part = mute(part) + + parts.append(part) + + if parts: + body = ''.join(parts) + else: + # in case somehow there are no coefficients at all + body = '0' + + return r'$x \mapsto {}$'.format(body) + + + # Pickle and copy def __getstate__(self): @@ -338,7 +425,7 @@ class ABCPolyBase(object): # there is no true divide if the rhs is not a Number, although it # could return the first n elements of an infinite series. # It is hard to see where n would come from, though. - if not isinstance(other, Number) or isinstance(other, bool): + if not isinstance(other, numbers.Number) or isinstance(other, bool): form = "unsupported types for true division: '%s', '%s'" raise TypeError(form % (type(self), type(other))) return self.__floordiv__(other) @@ -425,9 +512,6 @@ class ABCPolyBase(object): rem = self.__class__(rem, self.domain, self.window) return quo, rem - # Enhance me - # some augmented arithmetic operations could be added here - def __eq__(self, other): res = (isinstance(other, self.__class__) and np.all(self.domain == other.domain) and @@ -773,7 +857,9 @@ class ABCPolyBase(object): ------- new_series : series A series that represents the least squares fit to the data and - has the domain specified in the call. + has the domain and window specified in the call. If the + coefficients for the unscaled and unshifted basis polynomials are + of interest, do ``new_series.convert().coef``. [resid, rank, sv, rcond] : list These values are only returned if `full` = True diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 310c711ef..f14ed988d 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -21,9 +21,10 @@ Arithmetic ---------- - `chebadd` -- add two Chebyshev series. - `chebsub` -- subtract one Chebyshev series from another. +- `chebmulx` -- multiply a Chebyshev series in ``P_i(x)`` by ``x``. - `chebmul` -- multiply two Chebyshev series. - `chebdiv` -- divide one Chebyshev series by another. -- `chebpow` -- raise a Chebyshev series to an positive integer power +- `chebpow` -- raise a Chebyshev series to a positive integer power. - `chebval` -- evaluate a Chebyshev series at given points. - `chebval2d` -- evaluate a 2D Chebyshev series at given points. - `chebval3d` -- evaluate a 3D Chebyshev series at given points. @@ -579,7 +580,7 @@ def chebadd(c1, c2): See Also -------- - chebsub, chebmul, chebdiv, chebpow + chebsub, chebmulx, chebmul, chebdiv, chebpow Notes ----- @@ -629,7 +630,7 @@ def chebsub(c1, c2): See Also -------- - chebadd, chebmul, chebdiv, chebpow + chebadd, chebmulx, chebmul, chebdiv, chebpow Notes ----- @@ -684,6 +685,12 @@ def chebmulx(c): .. versionadded:: 1.5.0 + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebmulx([1,2,3]) + array([ 1., 2.5, 3., 1.5, 2.]) + """ # c is a trimmed copy [c] = pu.as_series([c]) @@ -722,7 +729,7 @@ def chebmul(c1, c2): See Also -------- - chebadd, chebsub, chebdiv, chebpow + chebadd, chebsub, chebmulx, chebdiv, chebpow Notes ----- @@ -773,7 +780,7 @@ def chebdiv(c1, c2): See Also -------- - chebadd, chebsub, chebmul, chebpow + chebadd, chebsub, chemulx, chebmul, chebpow Notes ----- @@ -841,10 +848,13 @@ def chebpow(c, pow, maxpower=16): See Also -------- - chebadd, chebsub, chebmul, chebdiv + chebadd, chebsub, chebmulx, chebmul, chebdiv Examples -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebpow([1, 2, 3, 4], 2) + array([15.5, 22. , 16. , 14. , 12.5, 12. , 8. ]) """ # c is a trimmed copy @@ -2188,3 +2198,4 @@ class Chebyshev(ABCPolyBase): nickname = 'cheb' domain = np.array(chebdomain) window = np.array(chebdomain) + basis_name = 'T' diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 75c7e6832..2aed4b34f 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -16,11 +16,12 @@ Constants Arithmetic ---------- -- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``. - `hermadd` -- add two Hermite series. - `hermsub` -- subtract one Hermite series from another. +- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``. - `hermmul` -- multiply two Hermite series. - `hermdiv` -- divide one Hermite series by another. +- `hermpow` -- raise a Hermite series to a positive integer power. - `hermval` -- evaluate a Hermite series at given points. - `hermval2d` -- evaluate a 2D Hermite series at given points. - `hermval3d` -- evaluate a 3D Hermite series at given points. @@ -323,7 +324,7 @@ def hermadd(c1, c2): See Also -------- - hermsub, hermmul, hermdiv, hermpow + hermsub, hermmulx, hermmul, hermdiv, hermpow Notes ----- @@ -371,7 +372,7 @@ def hermsub(c1, c2): See Also -------- - hermadd, hermmul, hermdiv, hermpow + hermadd, hermmulx, hermmul, hermdiv, hermpow Notes ----- @@ -417,6 +418,10 @@ def hermmulx(c): out : ndarray Array representing the result of the multiplication. + See Also + -------- + hermadd, hermsub, hermmul, hermdiv, hermpow + Notes ----- The multiplication uses the recursion relationship for Hermite @@ -469,7 +474,7 @@ def hermmul(c1, c2): See Also -------- - hermadd, hermsub, hermdiv, hermpow + hermadd, hermsub, hermmulx, hermdiv, hermpow Notes ----- @@ -537,7 +542,7 @@ def hermdiv(c1, c2): See Also -------- - hermadd, hermsub, hermmul, hermpow + hermadd, hermsub, hermmulx, hermmul, hermpow Notes ----- @@ -606,7 +611,7 @@ def hermpow(c, pow, maxpower=16): See Also -------- - hermadd, hermsub, hermmul, hermdiv + hermadd, hermsub, hermmulx, hermmul, hermdiv Examples -------- @@ -1851,3 +1856,4 @@ class Hermite(ABCPolyBase): nickname = 'herm' domain = np.array(hermdomain) window = np.array(hermdomain) + basis_name = 'H' diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 125364a11..d4520ad6c 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -16,11 +16,12 @@ Constants Arithmetic ---------- -- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``. - `hermeadd` -- add two Hermite_e series. - `hermesub` -- subtract one Hermite_e series from another. +- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``. - `hermemul` -- multiply two Hermite_e series. - `hermediv` -- divide one Hermite_e series by another. +- `hermepow` -- raise a Hermite_e series to a positive integer power. - `hermeval` -- evaluate a Hermite_e series at given points. - `hermeval2d` -- evaluate a 2D Hermite_e series at given points. - `hermeval3d` -- evaluate a 3D Hermite_e series at given points. @@ -324,7 +325,7 @@ def hermeadd(c1, c2): See Also -------- - hermesub, hermemul, hermediv, hermepow + hermesub, hermemulx, hermemul, hermediv, hermepow Notes ----- @@ -372,7 +373,7 @@ def hermesub(c1, c2): See Also -------- - hermeadd, hermemul, hermediv, hermepow + hermeadd, hermemulx, hermemul, hermediv, hermepow Notes ----- @@ -470,7 +471,7 @@ def hermemul(c1, c2): See Also -------- - hermeadd, hermesub, hermediv, hermepow + hermeadd, hermesub, hermemulx, hermediv, hermepow Notes ----- @@ -538,7 +539,7 @@ def hermediv(c1, c2): See Also -------- - hermeadd, hermesub, hermemul, hermepow + hermeadd, hermesub, hermemulx, hermemul, hermepow Notes ----- @@ -605,7 +606,7 @@ def hermepow(c, pow, maxpower=16): See Also -------- - hermeadd, hermesub, hermemul, hermediv + hermeadd, hermesub, hermemulx, hermemul, hermediv Examples -------- @@ -1848,3 +1849,4 @@ class HermiteE(ABCPolyBase): nickname = 'herme' domain = np.array(hermedomain) window = np.array(hermedomain) + basis_name = 'He' diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 2b9757ab8..a116d20a7 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -16,11 +16,12 @@ Constants Arithmetic ---------- -- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``. - `lagadd` -- add two Laguerre series. - `lagsub` -- subtract one Laguerre series from another. +- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``. - `lagmul` -- multiply two Laguerre series. - `lagdiv` -- divide one Laguerre series by another. +- `lagpow` -- raise a Laguerre series to a positive integer power. - `lagval` -- evaluate a Laguerre series at given points. - `lagval2d` -- evaluate a 2D Laguerre series at given points. - `lagval3d` -- evaluate a 3D Laguerre series at given points. @@ -320,7 +321,7 @@ def lagadd(c1, c2): See Also -------- - lagsub, lagmul, lagdiv, lagpow + lagsub, lagmulx, lagmul, lagdiv, lagpow Notes ----- @@ -369,7 +370,7 @@ def lagsub(c1, c2): See Also -------- - lagadd, lagmul, lagdiv, lagpow + lagadd, lagmulx, lagmul, lagdiv, lagpow Notes ----- @@ -415,6 +416,10 @@ def lagmulx(c): out : ndarray Array representing the result of the multiplication. + See Also + -------- + lagadd, lagsub, lagmul, lagdiv, lagpow + Notes ----- The multiplication uses the recursion relationship for Laguerre @@ -468,7 +473,7 @@ def lagmul(c1, c2): See Also -------- - lagadd, lagsub, lagdiv, lagpow + lagadd, lagsub, lagmulx, lagdiv, lagpow Notes ----- @@ -536,7 +541,7 @@ def lagdiv(c1, c2): See Also -------- - lagadd, lagsub, lagmul, lagpow + lagadd, lagsub, lagmulx, lagmul, lagpow Notes ----- @@ -603,7 +608,7 @@ def lagpow(c, pow, maxpower=16): See Also -------- - lagadd, lagsub, lagmul, lagdiv + lagadd, lagsub, lagmulx, lagmul, lagdiv Examples -------- @@ -1801,3 +1806,4 @@ class Laguerre(ABCPolyBase): nickname = 'lag' domain = np.array(lagdomain) window = np.array(lagdomain) + basis_name = 'L' diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index a83c5735f..e9c24594b 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -27,12 +27,12 @@ Arithmetic .. autosummary:: :toctree: generated/ - legmulx multiply a Legendre series in P_i(x) by x. legadd add two Legendre series. legsub subtract one Legendre series from another. + legmulx multiply a Legendre series in ``P_i(x)`` by ``x``. legmul multiply two Legendre series. legdiv divide one Legendre series by another. - legpow raise a Legendre series to an positive integer power + legpow raise a Legendre series to a positive integer power. legval evaluate a Legendre series at given points. legval2d evaluate a 2D Legendre series at given points. legval3d evaluate a 3D Legendre series at given points. @@ -351,7 +351,7 @@ def legadd(c1, c2): See Also -------- - legsub, legmul, legdiv, legpow + legsub, legmulx, legmul, legdiv, legpow Notes ----- @@ -401,7 +401,7 @@ def legsub(c1, c2): See Also -------- - legadd, legmul, legdiv, legpow + legadd, legmulx, legmul, legdiv, legpow Notes ----- @@ -451,6 +451,10 @@ def legmulx(c): out : ndarray Array representing the result of the multiplication. + See Also + -------- + legadd, legmul, legmul, legdiv, legpow + Notes ----- The multiplication uses the recursion relationship for Legendre @@ -460,6 +464,12 @@ def legmulx(c): xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1) + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> L.legmulx([1,2,3]) + array([ 0.66666667, 2.2, 1.33333333, 1.8]) + """ # c is a trimmed copy [c] = pu.as_series([c]) @@ -500,7 +510,7 @@ def legmul(c1, c2): See Also -------- - legadd, legsub, legdiv, legpow + legadd, legsub, legmulx, legdiv, legpow Notes ----- @@ -570,7 +580,7 @@ def legdiv(c1, c2): See Also -------- - legadd, legsub, legmul, legpow + legadd, legsub, legmulx, legmul, legpow Notes ----- @@ -640,7 +650,7 @@ def legpow(c, pow, maxpower=16): See Also -------- - legadd, legsub, legmul, legdiv + legadd, legsub, legmulx, legmul, legdiv Examples -------- @@ -1831,3 +1841,4 @@ class Legendre(ABCPolyBase): nickname = 'leg' domain = np.array(legdomain) window = np.array(legdomain) + basis_name = 'P' diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index adbf30234..259cd31f5 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -18,9 +18,10 @@ Arithmetic ---------- - `polyadd` -- add two polynomials. - `polysub` -- subtract one polynomial from another. +- `polymulx` -- multiply a polynomial in ``P_i(x)`` by ``x``. - `polymul` -- multiply two polynomials. - `polydiv` -- divide one polynomial by another. -- `polypow` -- raise a polynomial to an positive integer power +- `polypow` -- raise a polynomial to a positive integer power. - `polyval` -- evaluate a polynomial at given points. - `polyval2d` -- evaluate a 2D polynomial at given points. - `polyval3d` -- evaluate a 3D polynomial at given points. @@ -224,7 +225,7 @@ def polyadd(c1, c2): See Also -------- - polysub, polymul, polydiv, polypow + polysub, polymulx, polymul, polydiv, polypow Examples -------- @@ -269,7 +270,7 @@ def polysub(c1, c2): See Also -------- - polyadd, polymul, polydiv, polypow + polyadd, polymulx, polymul, polydiv, polypow Examples -------- @@ -312,6 +313,10 @@ def polymulx(c): out : ndarray Array representing the result of the multiplication. + See Also + -------- + polyadd, polysub, polymul, polydiv, polypow + Notes ----- @@ -351,7 +356,7 @@ def polymul(c1, c2): See Also -------- - polyadd, polysub, polydiv, polypow + polyadd, polysub, polymulx, polydiv, polypow Examples -------- @@ -388,7 +393,7 @@ def polydiv(c1, c2): See Also -------- - polyadd, polysub, polymul, polypow + polyadd, polysub, polymulx, polymul, polypow Examples -------- @@ -450,10 +455,13 @@ def polypow(c, pow, maxpower=None): See Also -------- - polyadd, polysub, polymul, polydiv + polyadd, polysub, polymulx, polymul, polydiv Examples -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polypow([1,2,3], 2) + array([ 1., 4., 10., 12., 9.]) """ # c is a trimmed copy @@ -1643,3 +1651,15 @@ class Polynomial(ABCPolyBase): nickname = 'poly' domain = np.array(polydomain) window = np.array(polydomain) + basis_name = None + + @staticmethod + def _repr_latex_term(i, arg_str, needs_parens): + if needs_parens: + arg_str = r'\left({}\right)'.format(arg_str) + if i == 0: + return '1' + elif i == 1: + return arg_str + else: + return '{}^{{{}}}'.format(arg_str, i) diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py index 439dfa08d..7fb7492c6 100644 --- a/numpy/polynomial/tests/test_chebyshev.py +++ b/numpy/polynomial/tests/test_chebyshev.py @@ -3,6 +3,8 @@ """ from __future__ import division, absolute_import, print_function +from functools import reduce + import numpy as np import numpy.polynomial.chebyshev as cheb from numpy.polynomial.polynomial import polyval @@ -111,6 +113,15 @@ class TestArithmetic(object): res = cheb.chebadd(cheb.chebmul(quo, ci), rem) assert_equal(trim(res), trim(tgt), err_msg=msg) + def test_chebpow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(cheb.chebmul, [c]*j, np.array([1])) + res = cheb.chebpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py index 738741668..15e24f92b 100644 --- a/numpy/polynomial/tests/test_classes.py +++ b/numpy/polynomial/tests/test_classes.py @@ -562,6 +562,56 @@ def test_ufunc_override(Poly): assert_raises(TypeError, np.add, x, p) + +class TestLatexRepr(object): + """Test the latex repr used by ipython """ + + def as_latex(self, obj): + # right now we ignore the formatting of scalars in our tests, since + # it makes them too verbose. Ideally, the formatting of scalars will + # be fixed such that tests below continue to pass + obj._repr_latex_scalar = lambda x: str(x) + try: + return obj._repr_latex_() + finally: + del obj._repr_latex_scalar + + def test_simple_polynomial(self): + # default input + p = Polynomial([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,x + 3.0\,x^{2}$') + + # translated input + p = Polynomial([1, 2, 3], domain=[-2, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$') + + # scaled input + p = Polynomial([1, 2, 3], domain=[-0.5, 0.5]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(2.0x\right) + 3.0\,\left(2.0x\right)^{2}$') + + # affine input + p = Polynomial([1, 2, 3], domain=[-1, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$') + + def test_basis_func(self): + p = Chebyshev([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{T}_{0}(x) + 2.0\,{T}_{1}(x) + 3.0\,{T}_{2}(x)$') + # affine input - check no surplus parens are added + p = Chebyshev([1, 2, 3], domain=[-1, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$') + + def test_multichar_basis_func(self): + p = HermiteE([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{He}_{0}(x) + 2.0\,{He}_{1}(x) + 3.0\,{He}_{2}(x)$') + + # # Test class method that only exists for some classes # diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index 18c26af8f..1287ef3fe 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -3,6 +3,8 @@ """ from __future__ import division, absolute_import, print_function +from functools import reduce + import numpy as np import numpy.polynomial.hermite as herm from numpy.polynomial.polynomial import polyval @@ -99,6 +101,15 @@ class TestArithmetic(object): res = herm.hermadd(herm.hermmul(quo, ci), rem) assert_equal(trim(res), trim(tgt), err_msg=msg) + def test_hermpow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(herm.hermmul, [c]*j, np.array([1])) + res = herm.hermpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py index 58d74dae9..ccb44ad73 100644 --- a/numpy/polynomial/tests/test_hermite_e.py +++ b/numpy/polynomial/tests/test_hermite_e.py @@ -3,6 +3,8 @@ """ from __future__ import division, absolute_import, print_function +from functools import reduce + import numpy as np import numpy.polynomial.hermite_e as herme from numpy.polynomial.polynomial import polyval @@ -99,6 +101,15 @@ class TestArithmetic(object): res = herme.hermeadd(herme.hermemul(quo, ci), rem) assert_equal(trim(res), trim(tgt), err_msg=msg) + def test_hermepow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(herme.hermemul, [c]*j, np.array([1])) + res = herme.hermepow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index 3cb630e46..3ababec5e 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -3,6 +3,8 @@ """ from __future__ import division, absolute_import, print_function +from functools import reduce + import numpy as np import numpy.polynomial.laguerre as lag from numpy.polynomial.polynomial import polyval @@ -96,6 +98,15 @@ class TestArithmetic(object): res = lag.lagadd(lag.lagmul(quo, ci), rem) assert_almost_equal(trim(res), trim(tgt), err_msg=msg) + def test_lagpow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(lag.lagmul, [c]*j, np.array([1])) + res = lag.lagpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index aeecd8775..a23086d59 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -3,6 +3,8 @@ """ from __future__ import division, absolute_import, print_function +from functools import reduce + import numpy as np import numpy.polynomial.legendre as leg from numpy.polynomial.polynomial import polyval @@ -100,6 +102,15 @@ class TestArithmetic(object): res = leg.legadd(leg.legmul(quo, ci), rem) assert_equal(trim(res), trim(tgt), err_msg=msg) + def test_legpow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(leg.legmul, [c]*j, np.array([1])) + res = leg.legpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 67728e35e..0c93be278 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -3,6 +3,8 @@ """ from __future__ import division, absolute_import, print_function +from functools import reduce + import numpy as np import numpy.polynomial.polynomial as poly from numpy.testing import ( @@ -102,6 +104,15 @@ class TestArithmetic(object): res = poly.polyadd(poly.polymul(quo, ci), rem) assert_equal(res, tgt, err_msg=msg) + def test_polypow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(poly.polymul, [c]*j, np.array([1])) + res = poly.polypow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py index 82aefce5f..965ab5ea9 100644 --- a/numpy/random/__init__.py +++ b/numpy/random/__init__.py @@ -6,17 +6,15 @@ Random Number Generation ==================== ========================================================= Utility functions ============================================================================== -random Uniformly distributed values of a given shape. +random_sample Uniformly distributed floats over ``[0, 1)``. +random Alias for `random_sample`. bytes Uniformly distributed random bytes. random_integers Uniformly distributed integers in a given range. -random_sample Uniformly distributed floats in a given range. -random Alias for random_sample -ranf Alias for random_sample -sample Alias for random_sample -choice Generate a weighted random sample from a given array-like permutation Randomly permute a sequence / generate a random sequence. shuffle Randomly permute a sequence in place. seed Seed the random number generator. +choice Random sample from 1-D array. + ==================== ========================================================= ==================== ========================================================= @@ -90,9 +88,55 @@ from __future__ import division, absolute_import, print_function import warnings -# To get sub-modules -from .info import __doc__, __all__ - +__all__ = [ + 'beta', + 'binomial', + 'bytes', + 'chisquare', + 'choice', + 'dirichlet', + 'exponential', + 'f', + 'gamma', + 'geometric', + 'get_state', + 'gumbel', + 'hypergeometric', + 'laplace', + 'logistic', + 'lognormal', + 'logseries', + 'multinomial', + 'multivariate_normal', + 'negative_binomial', + 'noncentral_chisquare', + 'noncentral_f', + 'normal', + 'pareto', + 'permutation', + 'poisson', + 'power', + 'rand', + 'randint', + 'randn', + 'random_integers', + 'random_sample', + 'rayleigh', + 'seed', + 'set_state', + 'shuffle', + 'standard_cauchy', + 'standard_exponential', + 'standard_gamma', + 'standard_normal', + 'standard_t', + 'triangular', + 'uniform', + 'vonmises', + 'wald', + 'weibull', + 'zipf' +] with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="numpy.ndarray size changed") diff --git a/numpy/random/info.py b/numpy/random/info.py index be9c8d9bd..b9fd7f26a 100644 --- a/numpy/random/info.py +++ b/numpy/random/info.py @@ -1,139 +1,5 @@ -""" -======================== -Random Number Generation -======================== - -==================== ========================================================= -Utility functions -============================================================================== -random_sample Uniformly distributed floats over ``[0, 1)``. -random Alias for `random_sample`. -bytes Uniformly distributed random bytes. -random_integers Uniformly distributed integers in a given range. -permutation Randomly permute a sequence / generate a random sequence. -shuffle Randomly permute a sequence in place. -seed Seed the random number generator. -choice Random sample from 1-D array. - -==================== ========================================================= - -==================== ========================================================= -Compatibility functions -============================================================================== -rand Uniformly distributed values. -randn Normally distributed values. -ranf Uniformly distributed floating point numbers. -randint Uniformly distributed integers in a given range. -==================== ========================================================= - -==================== ========================================================= -Univariate distributions -============================================================================== -beta Beta distribution over ``[0, 1]``. -binomial Binomial distribution. -chisquare :math:`\\chi^2` distribution. -exponential Exponential distribution. -f F (Fisher-Snedecor) distribution. -gamma Gamma distribution. -geometric Geometric distribution. -gumbel Gumbel distribution. -hypergeometric Hypergeometric distribution. -laplace Laplace distribution. -logistic Logistic distribution. -lognormal Log-normal distribution. -logseries Logarithmic series distribution. -negative_binomial Negative binomial distribution. -noncentral_chisquare Non-central chi-square distribution. -noncentral_f Non-central F distribution. -normal Normal / Gaussian distribution. -pareto Pareto distribution. -poisson Poisson distribution. -power Power distribution. -rayleigh Rayleigh distribution. -triangular Triangular distribution. -uniform Uniform distribution. -vonmises Von Mises circular distribution. -wald Wald (inverse Gaussian) distribution. -weibull Weibull distribution. -zipf Zipf's distribution over ranked data. -==================== ========================================================= - -==================== ========================================================= -Multivariate distributions -============================================================================== -dirichlet Multivariate generalization of Beta distribution. -multinomial Multivariate generalization of the binomial distribution. -multivariate_normal Multivariate generalization of the normal distribution. -==================== ========================================================= - -==================== ========================================================= -Standard distributions -============================================================================== -standard_cauchy Standard Cauchy-Lorentz distribution. -standard_exponential Standard exponential distribution. -standard_gamma Standard Gamma distribution. -standard_normal Standard normal distribution. -standard_t Standard Student's t-distribution. -==================== ========================================================= - -==================== ========================================================= -Internal functions -============================================================================== -get_state Get tuple representing internal state of generator. -set_state Set state of generator. -==================== ========================================================= - -""" from __future__ import division, absolute_import, print_function -depends = ['core'] +from .. import __doc__ -__all__ = [ - 'beta', - 'binomial', - 'bytes', - 'chisquare', - 'choice', - 'dirichlet', - 'exponential', - 'f', - 'gamma', - 'geometric', - 'get_state', - 'gumbel', - 'hypergeometric', - 'laplace', - 'logistic', - 'lognormal', - 'logseries', - 'multinomial', - 'multivariate_normal', - 'negative_binomial', - 'noncentral_chisquare', - 'noncentral_f', - 'normal', - 'pareto', - 'permutation', - 'poisson', - 'power', - 'rand', - 'randint', - 'randn', - 'random_integers', - 'random_sample', - 'rayleigh', - 'seed', - 'set_state', - 'shuffle', - 'standard_cauchy', - 'standard_exponential', - 'standard_gamma', - 'standard_normal', - 'standard_t', - 'triangular', - 'uniform', - 'vonmises', - 'wald', - 'weibull', - 'zipf' -] +depends = ['core'] diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index ec759fdfb..5097ad88f 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -2505,7 +2505,7 @@ cdef class RandomState: Examples -------- From Dalgaard page 83 [1]_, suppose the daily energy intake for 11 - women in Kj is: + women in kilojoules (kJ) is: >>> intake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \\ ... 7515, 8230, 8770]) @@ -4198,12 +4198,12 @@ cdef class RandomState: ----- The probability density for the Hypergeometric distribution is - .. math:: P(x) = \\frac{\\binom{m}{n}\\binom{N-m}{n-x}}{\\binom{N}{n}}, + .. math:: P(x) = \\frac{\\binom{g}{x}\\binom{b}{n-x}}{\\binom{g+b}{n}}, - where :math:`0 \\le x \\le m` and :math:`n+m-N \\le x \\le n` + where :math:`0 \\le x \\le n` and :math:`n-b \\le x \\le g` - for P(x) the probability of x successes, n = ngood, m = nbad, and - N = number of samples. + for P(x) the probability of x successes, g = ngood, b = nbad, and + n = number of samples. Consider an urn with black and white marbles in it, ngood of them black and nbad are white. If you draw nsample balls without diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 2e0885024..8328c69c0 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -1454,7 +1454,6 @@ class TestBroadcast(object): assert_raises(ValueError, zipf, np.nan) assert_raises(ValueError, zipf, [0, 0, np.nan]) - def test_geometric(self): p = [0.5] bad_p_one = [-1] diff --git a/numpy/testing/_private/nosetester.py b/numpy/testing/_private/nosetester.py index c2cf58377..1728d9d1f 100644 --- a/numpy/testing/_private/nosetester.py +++ b/numpy/testing/_private/nosetester.py @@ -338,12 +338,14 @@ class NoseTester(object): Identifies the tests to run. This can be a string to pass to the nosetests executable with the '-A' option, or one of several special values. Special values are: + * 'fast' - the default - which corresponds to the ``nosetests -A`` option of 'not slow'. * 'full' - fast (as above) and slow tests as in the 'no -A' option to nosetests - this is the same as ''. * None or '' - run all tests. - attribute_identifier - string passed directly to nosetests as '-A'. + * attribute_identifier - string passed directly to nosetests as '-A'. + verbose : int, optional Verbosity value for test outputs, in the range 1-10. Default is 1. extra_argv : list, optional @@ -352,16 +354,14 @@ class NoseTester(object): If True, run doctests in module. Default is False. coverage : bool, optional If True, report coverage of NumPy code. Default is False. - (This requires the `coverage module: - <http://nedbatchelder.com/code/modules/coverage.html>`_). + (This requires the + `coverage module <https://nedbatchelder.com/code/modules/coveragehtml>`_). raise_warnings : None, str or sequence of warnings, optional This specifies which warnings to configure as 'raise' instead - of being shown once during the test execution. Valid strings are: - - - "develop" : equals ``(Warning,)`` - - "release" : equals ``()``, don't raise on any warnings. + of being shown once during the test execution. Valid strings are: - The default is to use the class initialization value. + * "develop" : equals ``(Warning,)`` + * "release" : equals ``()``, do not raise on any warnings. timer : bool or int, optional Timing of individual tests with ``nose-timer`` (which needs to be installed). If True, time tests and report on all of them. @@ -489,12 +489,14 @@ class NoseTester(object): Identifies the benchmarks to run. This can be a string to pass to the nosetests executable with the '-A' option, or one of several special values. Special values are: + * 'fast' - the default - which corresponds to the ``nosetests -A`` option of 'not slow'. * 'full' - fast (as above) and slow benchmarks as in the 'no -A' option to nosetests - this is the same as ''. * None or '' - run all tests. - attribute_identifier - string passed directly to nosetests as '-A'. + * attribute_identifier - string passed directly to nosetests as '-A'. + verbose : int, optional Verbosity value for benchmark outputs, in the range 1-10. Default is 1. extra_argv : list, optional diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 0e2f8ba91..a3832fcde 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -687,6 +687,8 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, equal_inf=True): __tracebackhide__ = True # Hide traceback for py.test from numpy.core import array, isnan, inf, bool_ + from numpy.core.fromnumeric import all as npall + x = array(x, copy=False, subok=True) y = array(y, copy=False, subok=True) @@ -697,14 +699,21 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, return x.dtype.char in "Mm" def func_assert_same_pos(x, y, func=isnan, hasval='nan'): - """Handling nan/inf: combine results of running func on x and y, - checking that they are True at the same locations.""" - # Both the != True comparison here and the cast to bool_ at - # the end are done to deal with `masked`, which cannot be - # compared usefully, and for which .all() yields masked. + """Handling nan/inf. + + Combine results of running func on x and y, checking that they are True + at the same locations. + + """ + # Both the != True comparison here and the cast to bool_ at the end are + # done to deal with `masked`, which cannot be compared usefully, and + # for which np.all yields masked. The use of the function np.all is + # for back compatibility with ndarray subclasses that changed the + # return values of the all method. We are not committed to supporting + # such subclasses, but some used to work. x_id = func(x) y_id = func(y) - if (x_id == y_id).all() != True: + if npall(x_id == y_id) != True: msg = build_err_msg([x, y], err_msg + '\nx and y %s location mismatch:' % (hasval), verbose=verbose, header=header, diff --git a/numpy/testing/tests/test_decorators.py b/numpy/testing/tests/test_decorators.py index ea684140d..d00820b80 100644 --- a/numpy/testing/tests/test_decorators.py +++ b/numpy/testing/tests/test_decorators.py @@ -53,7 +53,6 @@ class TestNoseDecorators(object): assert_(f_istest.__test__) assert_(not f_isnottest.__test__) - def test_skip_functions_hardcoded(self): @dec.skipif(True) def f1(x): diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 84d310992..2c60e2867 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -1391,7 +1391,6 @@ class TestAssertNoGcCycles(object): assert_no_gc_cycles(no_cycle) - def test_asserts(self): def make_cycle(): a = [] @@ -1406,7 +1405,6 @@ class TestAssertNoGcCycles(object): with assert_raises(AssertionError): assert_no_gc_cycles(make_cycle) - def test_fails(self): """ Test that in cases where the garbage cannot be collected, we raise an diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py index 75ce9c8ca..675f8d242 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py @@ -12,11 +12,11 @@ try: cdll = None if hasattr(sys, 'gettotalrefcount'): try: - cdll = load_library('multiarray_d', np.core.multiarray.__file__) + cdll = load_library('_multiarray_umath_d', np.core._multiarray_umath.__file__) except OSError: pass if cdll is None: - cdll = load_library('multiarray', np.core.multiarray.__file__) + cdll = load_library('_multiarray_umath', np.core._multiarray_umath.__file__) _HAS_CTYPE = True except ImportError: _HAS_CTYPE = False @@ -30,7 +30,7 @@ class TestLoadLibrary(object): def test_basic(self): try: # Should succeed - load_library('multiarray', np.core.multiarray.__file__) + load_library('_multiarray_umath', np.core._multiarray_umath.__file__) except ImportError as e: msg = ("ctypes is not available on this python: skipping the test" " (import error was: %s)" % str(e)) @@ -43,7 +43,7 @@ class TestLoadLibrary(object): try: so = get_shared_lib_extension(is_python_ext=True) # Should succeed - load_library('multiarray%s' % so, np.core.multiarray.__file__) + load_library('_multiarray_umath%s' % so, np.core._multiarray_umath.__file__) except ImportError: print("No distutils available, skipping test.") except ImportError as e: @@ -170,3 +170,23 @@ class TestAsArray(object): check(as_array(pointer(c_array), shape=())) check(as_array(pointer(c_array[0]), shape=(2,))) check(as_array(pointer(c_array[0][0]), shape=(2, 3))) + + def test_reference_cycles(self): + # related to gh-6511 + import ctypes + + # create array to work with + # don't use int/long to avoid running into bpo-10746 + N = 100 + a = np.arange(N, dtype=np.short) + + # get pointer to array + pnt = np.ctypeslib.as_ctypes(a) + + with np.testing.assert_no_gc_cycles(): + # decay the array above to a pointer to its first element + newpnt = ctypes.cast(pnt, ctypes.POINTER(ctypes.c_short)) + # and construct an array using this data + b = np.ctypeslib.as_array(newpnt, (N,)) + # now delete both, which should cleanup both objects + del newpnt, b diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index ee09390c7..26e3ea745 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -62,32 +62,37 @@ def run_command(cmd, check_code=True): @pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace") def test_f2py(): # test that we can run f2py script + + def try_f2py_commands(cmds): + success = 0 + for f2py_cmd in cmds: + try: + code, stdout, stderr = run_command([f2py_cmd, '-v']) + assert_equal(stdout.strip(), b'2') + success += 1 + except Exception: + pass + return success + if sys.platform == 'win32': + # Only the single 'f2py' script is installed in windows. exe_dir = dirname(sys.executable) - if exe_dir.endswith('Scripts'): # virtualenv - f2py_cmd = r"%s\f2py.py" % exe_dir + f2py_cmds = [os.path.join(exe_dir, 'f2py')] else: - f2py_cmd = r"%s\Scripts\f2py.py" % exe_dir - - code, stdout, stderr = run_command([sys.executable, f2py_cmd, '-v']) - success = stdout.strip() == b'2' - assert_(success, "Warning: f2py not found in path") + f2py_cmds = [os.path.join(exe_dir, "Scripts", 'f2py')] + success = try_f2py_commands(f2py_cmds) + msg = "Warning: f2py not found in path" + assert_(success == 1, msg) else: + # Three scripts are installed in Unix-like systems: + # 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example, + # if installed with python3.7 the scripts would be named + # 'f2py', 'f2py3', and 'f2py3.7'. version = sys.version_info major = str(version.major) minor = str(version.minor) - f2py_cmds = ('f2py', 'f2py' + major, 'f2py' + major + '.' + minor) - success = False - - for f2py_cmd in f2py_cmds: - try: - code, stdout, stderr = run_command([f2py_cmd, '-v']) - assert_equal(stdout.strip(), b'2') - success = True - break - except Exception: - pass - msg = "Warning: neither %s nor %s nor %s found in path" % f2py_cmds - assert_(success, msg) + success = try_f2py_commands(f2py_cmds) + msg = "Warning: not all of %s, %s, and %s are found in path" % f2py_cmds + assert_(success == 3, msg) |