diff options
49 files changed, 790 insertions, 157 deletions
diff --git a/.codecov.yml b/.codecov.yml index d92d54c9d..8c19f9e8e 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -8,6 +8,6 @@ coverage: default: # Require 1% coverage, i.e., always succeed target: 1 - patch: false + patch: true changes: false comment: off diff --git a/.travis.yml b/.travis.yml index aa01457fb..85f6127cd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -27,16 +27,6 @@ stages: # Do the rest of the tests - name: Comprehensive tests -env: - global: - - WHEELHOUSE_UPLOADER_USERNAME=travis.numpy - # The following is generated with the command: - # travis encrypt -r numpy/numpy WHEELHOUSE_UPLOADER_SECRET=tH3AP1KeY - - secure: "IEicLPrP2uW+jW51GRwkONQpdPqMVtQL5bdroqR/U8r9Tr\ - XrbCVRhp4AP8JYZT0ptoBpmZWWGjmKBndB68QlMiUjQPow\ - iFWt9Ka92CaqYdU7nqfWp9VImSndPmssjmCXJ1v1IjZPAM\ - ahp7Qnm0rWRmA0z9SomuRUQOJQ6s684vU=" - jobs: include: # Do all python versions without environment variables set @@ -134,6 +124,3 @@ before_install: script: - ./tools/travis-test.sh - -after_success: - - ./tools/travis-upload-wheel.sh diff --git a/doc/Makefile b/doc/Makefile index 4bc2bbf5a..688e52d07 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -103,11 +103,11 @@ dist: build/dist.tar.gz build/dist.tar.gz: make $(DIST_VARS) real-dist -real-dist: dist-build html-build html-scipyorg +real-dist: dist-build html-build test -d build/latex || make latex-build make -C build/latex all-pdf -rm -rf build/dist - cp -r build/html-scipyorg build/dist + cp -r build/html build/dist cd build/html && zip -9r ../dist/numpy-html.zip . cp build/latex/numpy-ref.pdf build/dist cp build/latex/numpy-user.pdf build/dist diff --git a/doc/neps/index.rst.tmpl b/doc/neps/index.rst.tmpl index 4c5b7766f..0299f8671 100644 --- a/doc/neps/index.rst.tmpl +++ b/doc/neps/index.rst.tmpl @@ -29,6 +29,9 @@ Meta-NEPs (NEPs about NEPs or Processes) nep-template + +{% if has_provisional %} + Provisional NEPs (provisionally accepted; interface may change) --------------------------------------------------------------- @@ -39,6 +42,8 @@ Provisional NEPs (provisionally accepted; interface may change) {{ tags['Title'] }} <{{ tags['Filename'] }}> {% endfor %} +{% endif %} + Accepted NEPs (implementation in progress) ------------------------------------------ diff --git a/doc/neps/nep-0018-array-function-protocol.rst b/doc/neps/nep-0018-array-function-protocol.rst index fb9b838b5..3147d8cee 100644 --- a/doc/neps/nep-0018-array-function-protocol.rst +++ b/doc/neps/nep-0018-array-function-protocol.rst @@ -7,7 +7,7 @@ NEP 18 — A dispatch mechanism for NumPy's high level array functions :Author: Marten van Kerkwijk <mhvk@astro.utoronto.ca> :Author: Hameer Abbasi <hameerabbasi@yahoo.com> :Author: Eric Wieser <wieser.eric@gmail.com> -:Status: Provisional +:Status: Final :Type: Standards Track :Created: 2018-05-29 :Updated: 2019-05-25 diff --git a/doc/neps/nep-0029-deprecation_policy.rst b/doc/neps/nep-0029-deprecation_policy.rst index bdc0baa39..dbead1b9b 100644 --- a/doc/neps/nep-0029-deprecation_policy.rst +++ b/doc/neps/nep-0029-deprecation_policy.rst @@ -1,3 +1,5 @@ +.. _NEP29: + ================================================================================== NEP 29 — Recommend Python and Numpy version support as a community policy standard ================================================================================== diff --git a/doc/neps/tools/build_index.py b/doc/neps/tools/build_index.py index 7d159fdb3..51227a6f1 100644 --- a/doc/neps/tools/build_index.py +++ b/doc/neps/tools/build_index.py @@ -22,6 +22,7 @@ def nep_metadata(): meta_re = r':([a-zA-Z\-]*): (.*)' + has_provisional = False neps = {} print('Loading metadata for:') for source in sources: @@ -58,6 +59,8 @@ def nep_metadata(): f'NEP {nr} is Accepted/Rejected/Withdrawn but ' 'has no Resolution tag' ) + if tags['Status'] == 'Provisional': + has_provisional = True neps[nr] = tags @@ -95,7 +98,7 @@ def nep_metadata(): f'been set to Superseded' ) - return {'neps': neps} + return {'neps': neps, 'has_provisional': has_provisional} infile = 'index.rst.tmpl' diff --git a/doc/release/upcoming_changes/15900.deprecation.rst b/doc/release/upcoming_changes/15900.deprecation.rst new file mode 100644 index 000000000..22be711d0 --- /dev/null +++ b/doc/release/upcoming_changes/15900.deprecation.rst @@ -0,0 +1,16 @@ +Indexing errors will be reported even when index result is empty +---------------------------------------------------------------- +In the future, NumPy will raise an IndexError when an +integer array index contains out of bound values even if a non-indexed +dimension is of length 0. This will now emit a DeprecationWarning. +This can happen when the array is previously empty, or an empty +slice is involved:: + + arr1 = np.zeros((5, 0)) + arr1[[20]] + arr2 = np.zeros((5, 5)) + arr2[[20], :0] + +Previously the non-empty index ``[20]`` was not checked for correctness. +It will now be checked causing a deprecation warning which will be turned +into an error. This also applies to assignments. diff --git a/doc/source/conf.py b/doc/source/conf.py index a58dc4897..d6a0f8bf3 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -155,6 +155,9 @@ plot_html_show_source_link = False # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' +# XeLaTeX for better support of unicode characters +latex_engine = 'xelatex' + # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). _stdauthor = 'Written by the NumPy community' diff --git a/doc/source/docs/howto_document.rst b/doc/source/docs/howto_document.rst index 2a97a100d..cf86b7e99 100644 --- a/doc/source/docs/howto_document.rst +++ b/doc/source/docs/howto_document.rst @@ -4,6 +4,13 @@ A Guide to NumPy/SciPy Documentation ==================================== +User documentation +******************* +NumPy text documents should follow the `Google developer documentation style guide <https://developers.google.com/style>`_. + +Docstrings +********** + When using `Sphinx <http://www.sphinx-doc.org/>`__ in combination with the numpy conventions, you should use the ``numpydoc`` extension so that your docstrings will be handled correctly. For example, Sphinx will extract the diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst new file mode 100644 index 000000000..4e6a29d9f --- /dev/null +++ b/doc/source/user/index.rst @@ -0,0 +1,26 @@ +:orphan: + +.. _user: + +################ +NumPy User Guide +################ + +This guide is intended as an introductory overview of NumPy and +explains how to install and make use of the most important features of +NumPy. For detailed reference documentation of the functions and +classes contained in the package, see the :ref:`reference`. + +.. toctree:: + :maxdepth: 1 + + setting-up + quickstart + absolute_beginners + basics + misc + numpy-for-matlab-users + building + c-info + tutorials_index + howtos_index diff --git a/numpy/core/_dtype_ctypes.py b/numpy/core/_dtype_ctypes.py index 708241289..6d7cbb244 100644 --- a/numpy/core/_dtype_ctypes.py +++ b/numpy/core/_dtype_ctypes.py @@ -22,9 +22,10 @@ Unfortunately, this fails because: * PEP3118 cannot represent unions, but both numpy and ctypes can * ctypes cannot handle big-endian structs with PEP3118 (bpo-32780) """ -import _ctypes -import ctypes +# We delay-import ctypes for distributions that do not include it. +# While this module is not used unless the user passes in ctypes +# members, it is eagerly imported from numpy/core/__init__.py. import numpy as np @@ -39,6 +40,7 @@ def _from_ctypes_structure(t): "ctypes bitfields have no dtype equivalent") if hasattr(t, "_pack_"): + import ctypes formats = [] offsets = [] names = [] @@ -79,6 +81,7 @@ def _from_ctypes_scalar(t): def _from_ctypes_union(t): + import ctypes formats = [] offsets = [] names = [] @@ -98,6 +101,7 @@ def dtype_from_ctypes_type(t): """ Construct a dtype object from a ctypes type """ + import _ctypes if issubclass(t, _ctypes.Array): return _from_ctypes_array(t) elif issubclass(t, _ctypes._Pointer): diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt index 528113a9e..1868610f4 100644 --- a/numpy/core/code_generators/cversions.txt +++ b/numpy/core/code_generators/cversions.txt @@ -52,3 +52,6 @@ # Version 13 (NumPy 1.19) No change. # Version 13 (NumPy 1.20) No change. 0x0000000d = 5b0e8bbded00b166125974fc71e80a33 + +# Version 14 (NumPy 1.19) DType related API additions +0x0000000e = 17a0f366e55ec05e5c5c149123478452 diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py index 88dc2d90a..d88772bdc 100644 --- a/numpy/core/code_generators/genapi.py +++ b/numpy/core/code_generators/genapi.py @@ -37,6 +37,7 @@ API_FILES = [join('multiarray', 'alloc.c'), join('multiarray', 'datetime_busdaycal.c'), join('multiarray', 'datetime_strings.c'), join('multiarray', 'descriptor.c'), + join('multiarray', 'dtypemeta.c'), join('multiarray', 'einsum.c.src'), join('multiarray', 'flagsobject.c'), join('multiarray', 'getset.c'), @@ -309,11 +310,13 @@ def write_file(filename, data): # Those *Api classes instances know how to output strings for the generated code class TypeApi: - def __init__(self, name, index, ptr_cast, api_name): + def __init__(self, name, index, ptr_cast, api_name, internal_type=None): self.index = index self.name = name self.ptr_cast = ptr_cast self.api_name = api_name + # The type used internally, if None, same as exported (ptr_cast) + self.internal_type = internal_type def define_from_array_api_string(self): return "#define %s (*(%s *)%s[%d])" % (self.name, @@ -325,9 +328,19 @@ class TypeApi: return " (void *) &%s" % self.name def internal_define(self): - astr = """\ -extern NPY_NO_EXPORT PyTypeObject %(type)s; -""" % {'type': self.name} + if self.internal_type is None: + return f"extern NPY_NO_EXPORT {self.ptr_cast} {self.name};\n" + + # If we are here, we need to define a larger struct internally, which + # the type can be cast safely. But we want to normally use the original + # type, so name mangle: + mangled_name = f"{self.name}Full" + astr = ( + # Create the mangled name: + f"extern NPY_NO_EXPORT {self.internal_type} {mangled_name};\n" + # And define the name as: (*(type *)(&mangled_name)) + f"#define {self.name} (*({self.ptr_cast} *)(&{mangled_name}))\n" + ) return astr class GlobalVarApi: diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py index fe21bc543..7997135bb 100644 --- a/numpy/core/code_generators/generate_numpy_api.py +++ b/numpy/core/code_generators/generate_numpy_api.py @@ -201,7 +201,9 @@ def do_generate_api(targets, sources): for name, val in types_api.items(): index = val[0] - multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name) + internal_type = None if len(val) == 1 else val[1] + multiarray_api_dict[name] = TypeApi( + name, index, 'PyTypeObject', api_name, internal_type) if len(multiarray_api_dict) != len(multiarray_api_index): keys_dict = set(multiarray_api_dict.keys()) diff --git a/numpy/core/code_generators/numpy_api.py b/numpy/core/code_generators/numpy_api.py index 916fb537e..fbd323368 100644 --- a/numpy/core/code_generators/numpy_api.py +++ b/numpy/core/code_generators/numpy_api.py @@ -30,7 +30,9 @@ multiarray_scalar_bool_values = { multiarray_types_api = { 'PyBigArray_Type': (1,), 'PyArray_Type': (2,), - 'PyArrayDescr_Type': (3,), + # Internally, PyArrayDescr_Type is a PyArray_DTypeMeta, + # the following also defines PyArrayDescr_TypeFull (Full appended) + 'PyArrayDescr_Type': (3, "PyArray_DTypeMeta"), 'PyArrayFlags_Type': (4,), 'PyArrayIter_Type': (5,), 'PyArrayMultiIter_Type': (6,), diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 7193af839..0c63bcf73 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -1374,10 +1374,17 @@ def resize(a, new_shape): See Also -------- + np.reshape : Reshape an array without changing the total size. + np.pad : Enlarge and pad an array. + np.repeat: Repeat elements of an array. ndarray.resize : resize an array in-place. Notes ----- + When the total size of the array does not change `~numpy.reshape` should + be used. In most other cases either indexing (to reduce the size) + or padding (to increase the size) may be a more appropriate solution. + Warning: This functionality does **not** consider axes separately, i.e. it does not apply interpolation/extrapolation. It fills the return array with the required number of elements, taken @@ -1401,22 +1408,21 @@ def resize(a, new_shape): """ if isinstance(new_shape, (int, nt.integer)): new_shape = (new_shape,) + a = ravel(a) - Na = len(a) - total_size = um.multiply.reduce(new_shape) - if Na == 0 or total_size == 0: - return mu.zeros(new_shape, a.dtype) - n_copies = int(total_size / Na) - extra = total_size % Na + new_size = 1 + for dim_length in new_shape: + new_size *= dim_length + if dim_length < 0: + raise ValueError('all elements of `new_shape` must be non-negative') - if extra != 0: - n_copies = n_copies + 1 - extra = Na - extra + if a.size == 0 or new_size == 0: + # First case must zero fill. The second would have repeats == 0. + return np.zeros_like(a, shape=new_shape) - a = concatenate((a,) * n_copies) - if extra > 0: - a = a[:-extra] + repeats = -(-new_size // a.size) # ceil division + a = concatenate((a,) * repeats)[:new_size] return reshape(a, new_shape) diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index 5b7e8952e..1b61899fa 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -1809,6 +1809,77 @@ typedef struct { typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, void *user_data); + +/* + * PyArray_DTypeMeta related definitions. + * + * As of now, this API is preliminary and will be extended as necessary. + */ +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* + * The Structures defined in this block are considered private API and + * may change without warning! + */ + /* TODO: Make this definition public in the API, as soon as its settled */ + NPY_NO_EXPORT extern PyTypeObject PyArrayDTypeMeta_Type; + + /* + * While NumPy DTypes would not need to be heap types the plan is to + * make DTypes available in Python at which point we will probably want + * them to be. + * Since we also wish to add fields to the DType class, this looks like + * a typical instance definition, but with PyHeapTypeObject instead of + * only the PyObject_HEAD. + * This must only be exposed very extremely careful consideration, since + * it is a fairly complex construct which may be better to allow + * refactoring of. + */ + typedef struct _PyArray_DTypeMeta { + PyHeapTypeObject super; + + /* + * Most DTypes will have a singleton default instance, for the + * parametric legacy DTypes (bytes, string, void, datetime) this + * may be a pointer to the *prototype* instance? + */ + PyArray_Descr *singleton; + /* + * Is this DType created using the old API? This exists mainly to + * allow for assertions in paths specific to wrapping legacy types. + */ + npy_bool legacy; + /* The values stored by a parametric datatype depend on its instance */ + npy_bool parametric; + /* whether the DType can be instantiated (i.e. np.dtype cannot) */ + npy_bool abstract; + + /* + * The following fields replicate the most important dtype information. + * In the legacy implementation most of these are stored in the + * PyArray_Descr struct. + */ + /* The type object of the scalar instances (may be NULL?) */ + PyTypeObject *scalar_type; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* flags describing data type */ + char flags; + /* number representing this type */ + int type_num; + /* + * Point to the original ArrFuncs. + * NOTE: We could make a copy to detect changes to `f`. + */ + PyArray_ArrFuncs *f; + } PyArray_DTypeMeta; + + #define NPY_DTYPE(descr) ((PyArray_DTypeMeta *)Py_TYPE(descr)) + +#endif /* NPY_INTERNAL_BUILD */ + + /* * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files * npy_*_*_deprecated_api.h are only included from here and nowhere else. diff --git a/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/numpy/core/include/numpy/npy_1_7_deprecated_api.h index 440458010..a4f90e019 100644 --- a/numpy/core/include/numpy/npy_1_7_deprecated_api.h +++ b/numpy/core/include/numpy/npy_1_7_deprecated_api.h @@ -13,11 +13,10 @@ #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " #pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \ "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION") -#elif defined(__GNUC__) +#else #warning "Using deprecated NumPy API, disable it with " \ "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" #endif -/* TODO: How to do this warning message for other compilers? */ #endif /* diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h index efe196c84..798da6957 100644 --- a/numpy/core/include/numpy/npy_3kcompat.h +++ b/numpy/core/include/numpy/npy_3kcompat.h @@ -60,6 +60,14 @@ static NPY_INLINE int PyInt_Check(PyObject *op) { PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength) #endif +#if PY_VERSION_HEX < 0x030900a4 + /* Introduced in https://github.com/python/cpython/commit/d2ec81a8c99796b51fb8c49b77a7fe369863226f */ + #define Py_SET_TYPE(obj, typ) (Py_TYPE(obj) = typ) + /* Introduced in https://github.com/python/cpython/commit/b10dc3e7a11fcdb97e285882eba6da92594f90f9 */ + #define Py_SET_SIZE(obj, size) (Py_SIZE(obj) = size) +#endif + + #define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x) /* Py_SETREF was added in 3.5.2, and only if Py_LIMITED_API is absent */ @@ -546,4 +554,5 @@ NpyCapsule_Check(PyObject *ptr) } #endif + #endif /* _NPY_3KCOMPAT_H_ */ diff --git a/numpy/core/records.py b/numpy/core/records.py index af59de425..7e1c0d591 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -772,7 +772,7 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, names=None, titles=None, aligned=False, byteorder=None): - """Create a record array from binary data + r"""Create a record array from binary data Note that despite the name of this function it does not accept `str` instances. diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 9de9fc632..16bac4272 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -784,6 +784,7 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'conversion_utils.h'), join('src', 'multiarray', 'ctors.h'), join('src', 'multiarray', 'descriptor.h'), + join('src', 'multiarray', 'dtypemeta.h'), join('src', 'multiarray', 'dragon4.h'), join('src', 'multiarray', 'getset.h'), join('src', 'multiarray', 'hashdescr.h'), @@ -842,6 +843,7 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'datetime_busday.c'), join('src', 'multiarray', 'datetime_busdaycal.c'), join('src', 'multiarray', 'descriptor.c'), + join('src', 'multiarray', 'dtypemeta.c'), join('src', 'multiarray', 'dragon4.c'), join('src', 'multiarray', 'dtype_transfer.c'), join('src', 'multiarray', 'einsum.c.src'), diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index 63c4a76a9..72b59f9ae 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -40,7 +40,8 @@ C_ABI_VERSION = 0x01000009 # 0x0000000c - 1.14.x # 0x0000000c - 1.15.x # 0x0000000d - 1.16.x -C_API_VERSION = 0x0000000d +# 0x0000000e - 1.19.x +C_API_VERSION = 0x0000000e class MismatchCAPIWarning(Warning): pass diff --git a/numpy/core/src/common/npy_cpu_features.c.src b/numpy/core/src/common/npy_cpu_features.c.src index 4f193a471..bd4743905 100644 --- a/numpy/core/src/common/npy_cpu_features.c.src +++ b/numpy/core/src/common/npy_cpu_features.c.src @@ -76,11 +76,13 @@ npy__cpu_getxcr0(void) #if defined(_MSC_VER) || defined (__INTEL_COMPILER) return _xgetbv(0); #elif defined(__GNUC__) || defined(__clang__) + /* named form of xgetbv not supported on OSX, so must use byte form, see: + * https://github.com/asmjit/asmjit/issues/78 + */ unsigned int eax, edx; - __asm__("xgetbv" : "=a" (eax), "=d" (edx) : "c" (0)); - return (eax | (unsigned long long)edx << 32); + __asm(".byte 0x0F, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c"(0)); + return eax; #else - // TODO: handle other x86 compilers return 0; #endif } @@ -110,7 +112,6 @@ npy__cpu_cpuid(int reg[4], int func_id) ); #endif #else - // TODO: handle other x86 compilers reg[0] = 0; #endif } @@ -123,8 +124,15 @@ npy__cpu_init_features(void) // validate platform support int reg[] = {0, 0, 0, 0}; npy__cpu_cpuid(reg, 0); - if (reg[0] == 0) - return; + if (reg[0] == 0) { + npy__cpu_have[NPY_CPU_FEATURE_MMX] = 1; + npy__cpu_have[NPY_CPU_FEATURE_SSE] = 1; + npy__cpu_have[NPY_CPU_FEATURE_SSE2] = 1; + #ifdef NPY_CPU_AMD64 + npy__cpu_have[NPY_CPU_FEATURE_SSE3] = 1; + #endif + return; + } npy__cpu_cpuid(reg, 1); npy__cpu_have[NPY_CPU_FEATURE_MMX] = (reg[3] & (1 << 23)) != 0; @@ -341,15 +349,17 @@ npy__cpu_init_features_linux(void) if ((hwcap2 & NPY__HWCAP2_AES) || (hwcap2 & NPY__HWCAP2_SHA1) || (hwcap2 & NPY__HWCAP2_SHA2) || (hwcap2 & NPY__HWCAP2_PMULL) || (hwcap2 & NPY__HWCAP2_CRC32)) + { + hwcap = hwcap2; #else if (1) -#endif { if (!(hwcap & (NPY__HWCAP_FP | NPY__HWCAP_ASIMD))) { // Is this could happen? maybe disabled by kernel // BTW this will break the baseline of AARCH64 return 1; } +#endif npy__cpu_have[NPY_CPU_FEATURE_FPHP] = (hwcap & NPY__HWCAP_FPHP) != 0; npy__cpu_have[NPY_CPU_FEATURE_ASIMDHP] = (hwcap & NPY__HWCAP_ASIMDHP) != 0; npy__cpu_have[NPY_CPU_FEATURE_ASIMDDP] = (hwcap & NPY__HWCAP_ASIMDDP) != 0; diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index 38d5f21eb..552c56349 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -20,6 +20,7 @@ #include "npy_sort.h" #include "common.h" #include "ctors.h" +#include "dtypemeta.h" #include "lowlevel_strided_loops.h" #include "usertypes.h" #include "_datetime.h" @@ -4367,6 +4368,17 @@ set_typeinfo(PyObject *dict) PyObject *cobj, *key; /* + * Override the base class for all types, eventually all of this logic + * should be defined on the class and inherited to the scalar. + * (NPY_HALF is the largest builtin one.) + */ + for (i = 0; i <= NPY_HALF; i++) { + if (dtypemeta_wrap_legacy_descriptor(_builtin_descrs[i]) < 0) { + return -1; + } + } + + /* * Add cast functions for the new types */ diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 9e8646b25..14e64b647 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -869,7 +869,7 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it, return 0; } -static PyObject * +static void raise_memory_error(int nd, npy_intp const *dims, PyArray_Descr *descr) { static PyObject *exc_type = NULL; @@ -894,12 +894,12 @@ raise_memory_error(int nd, npy_intp const *dims, PyArray_Descr *descr) } PyErr_SetObject(exc_type, exc_value); Py_DECREF(exc_value); - return NULL; + return; fail: /* we couldn't raise the formatted exception for some reason */ PyErr_WriteUnraisable(NULL); - return PyErr_NoMemory(); + PyErr_NoMemory(); } /* @@ -1079,10 +1079,10 @@ PyArray_NewFromDescr_int( data = npy_alloc_cache(nbytes); } if (data == NULL) { - return raise_memory_error(fa->nd, fa->dimensions, descr); + raise_memory_error(fa->nd, fa->dimensions, descr); + goto fail; } fa->flags |= NPY_ARRAY_OWNDATA; - } else { /* diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index b26a26abf..b4107f8f3 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -1744,7 +1744,7 @@ fail: NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew(PyArray_Descr *base) { - PyArray_Descr *newdescr = PyObject_New(PyArray_Descr, &PyArrayDescr_Type); + PyArray_Descr *newdescr = PyObject_New(PyArray_Descr, Py_TYPE(base)); if (newdescr == NULL) { return NULL; @@ -2261,9 +2261,16 @@ static PyGetSetDef arraydescr_getsets[] = { }; static PyObject * -arraydescr_new(PyTypeObject *NPY_UNUSED(subtype), +arraydescr_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds) { + if (subtype != &PyArrayDescr_Type) { + /* The DTypeMeta class should prevent this from happening. */ + PyErr_Format(PyExc_SystemError, + "'%S' must not inherit np.dtype.__new__().", subtype); + return NULL; + } + PyObject *odescr, *metadata=NULL; PyArray_Descr *descr, *conv; npy_bool align = NPY_FALSE; @@ -2334,6 +2341,7 @@ arraydescr_new(PyTypeObject *NPY_UNUSED(subtype), return (PyObject *)conv; } + /* * Return a tuple of * (cleaned metadata dictionary, tuple with (str, num)) @@ -3456,21 +3464,34 @@ static PyMappingMethods descr_as_mapping = { /****************** End of Mapping Protocol ******************************/ -NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type = { - PyVarObject_HEAD_INIT(NULL, 0) - .tp_name = "numpy.dtype", - .tp_basicsize = sizeof(PyArray_Descr), - /* methods */ - .tp_dealloc = (destructor)arraydescr_dealloc, - .tp_repr = (reprfunc)arraydescr_repr, - .tp_as_number = &descr_as_number, - .tp_as_sequence = &descr_as_sequence, - .tp_as_mapping = &descr_as_mapping, - .tp_str = (reprfunc)arraydescr_str, - .tp_flags = Py_TPFLAGS_DEFAULT, - .tp_richcompare = (richcmpfunc)arraydescr_richcompare, - .tp_methods = arraydescr_methods, - .tp_members = arraydescr_members, - .tp_getset = arraydescr_getsets, - .tp_new = arraydescr_new, + +/* + * NOTE: Since this is a MetaClass, the name has Full appended here, the + * correct name of the type is PyArrayDescr_Type. + */ +NPY_NO_EXPORT PyArray_DTypeMeta PyArrayDescr_TypeFull = { + {{ + /* NULL represents `type`, this is set to DTypeMeta at import time */ + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "numpy.dtype", + .tp_basicsize = sizeof(PyArray_Descr), + .tp_dealloc = (destructor)arraydescr_dealloc, + .tp_repr = (reprfunc)arraydescr_repr, + .tp_as_number = &descr_as_number, + .tp_as_sequence = &descr_as_sequence, + .tp_as_mapping = &descr_as_mapping, + .tp_str = (reprfunc)arraydescr_str, + .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + .tp_richcompare = (richcmpfunc)arraydescr_richcompare, + .tp_methods = arraydescr_methods, + .tp_members = arraydescr_members, + .tp_getset = arraydescr_getsets, + .tp_new = arraydescr_new, + },}, + .type_num = -1, + .kind = '\0', + .abstract = 1, + .parametric = 0, + .singleton = 0, + .scalar_type = NULL, }; diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c new file mode 100644 index 000000000..9982cd676 --- /dev/null +++ b/numpy/core/src/multiarray/dtypemeta.c @@ -0,0 +1,268 @@ +/* Array Descr Object */ + +#define PY_SSIZE_T_CLEAN +#include <Python.h> +#include "structmember.h" +#include "assert.h" + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE +#include <numpy/ndarraytypes.h> +#include "npy_pycompat.h" + +#include "dtypemeta.h" + + +static void +dtypemeta_dealloc(PyArray_DTypeMeta *self) { + /* Do not accidentally delete a statically defined DType: */ + assert(((PyTypeObject *)self)->tp_flags & Py_TPFLAGS_HEAPTYPE); + + Py_XDECREF(self->scalar_type); + Py_XDECREF(self->singleton); + PyType_Type.tp_dealloc((PyObject *) self); +} + +static PyObject * +dtypemeta_new(PyTypeObject *NPY_UNUSED(type), + PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds)) +{ + PyErr_SetString(PyExc_TypeError, + "Preliminary-API: Cannot subclass DType."); + return NULL; +} + +static int +dtypemeta_init(PyTypeObject *NPY_UNUSED(type), + PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds)) +{ + PyErr_SetString(PyExc_TypeError, + "Preliminary-API: Cannot __init__ DType class."); + return -1; +} + +/** + * tp_is_gc slot of Python types. This is implemented only for documentation + * purposes to indicate and document the subtleties involved. + * + * Python Type objects are either statically created (typical C-Extension type) + * or HeapTypes (typically created in Python). + * HeapTypes have the Py_TPFLAGS_HEAPTYPE flag and are garbage collected. + * Our DTypeMeta instances (`np.dtype` and its subclasses) *may* be HeapTypes + * if the Py_TPFLAGS_HEAPTYPE flag is set (they are created from Python). + * They are not for legacy DTypes or np.dtype itself. + * + * @param self + * @return nonzero if the object is garbage collected + */ +static NPY_INLINE int +dtypemeta_is_gc(PyObject *dtype_class) +{ + return PyType_Type.tp_is_gc(dtype_class); +} + + +static int +dtypemeta_traverse(PyArray_DTypeMeta *type, visitproc visit, void *arg) +{ + /* + * We have to traverse the base class (if it is a HeapType). + * PyType_Type will handle this logic for us. + * This function is currently not used, but will probably be necessary + * in the future when we implement HeapTypes (python/dynamically + * defined types). It should be revised at that time. + */ + assert(0); + assert(!type->legacy && (PyTypeObject *)type != &PyArrayDescr_Type); + Py_VISIT(type->singleton); + Py_VISIT(type->scalar_type); + return PyType_Type.tp_traverse((PyObject *)type, visit, arg); +} + + +static PyObject * +legacy_dtype_default_new(PyArray_DTypeMeta *self, + PyObject *args, PyObject *kwargs) +{ + /* TODO: This should allow endianess and possibly metadata */ + if (self->parametric) { + /* reject parametric ones since we would need to get unit, etc. info */ + PyErr_Format(PyExc_TypeError, + "Preliminary-API: Flexible/Parametric legacy DType '%S' can " + "only be instantiated using `np.dtype(...)`", self); + return NULL; + } + + if (PyTuple_GET_SIZE(args) != 0 || + (kwargs != NULL && PyDict_Size(kwargs))) { + PyErr_Format(PyExc_TypeError, + "currently only the no-argument instantiation is supported; " + "use `np.dtype` instead."); + return NULL; + } + Py_INCREF(self->singleton); + return (PyObject *)self->singleton; +} + +/** + * This function takes a PyArray_Descr and replaces its base class with + * a newly created dtype subclass (DTypeMeta instances). + * There are some subtleties that need to be remembered when doing this, + * first for the class objects itself it could be either a HeapType or not. + * Since we are defining the DType from C, we will not make it a HeapType, + * thus making it identical to a typical *static* type (except that we + * malloc it). We could do it the other way, but there seems no reason to + * do so. + * + * The DType instances (the actual dtypes or descriptors), are based on + * prototypes which are passed in. These should not be garbage collected + * and thus Py_TPFLAGS_HAVE_GC is not set. (We could allow this, but than + * would have to allocate a new object, since the GC needs information before + * the actual struct). + * + * The above is the reason why we should works exactly like we would for a + * static type here. + * Otherwise, we blurry the lines between C-defined extension classes + * and Python subclasses. e.g. `class MyInt(int): pass` is very different + * from our `class Float64(np.dtype): pass`, because the latter should not + * be a HeapType and its instances should be exact PyArray_Descr structs. + * + * @param descr The descriptor that should be wrapped. + * @param name The name for the DType, if NULL the type character is used. + * + * @returns 0 on success, -1 on failure. + */ +NPY_NO_EXPORT int +dtypemeta_wrap_legacy_descriptor(PyArray_Descr *descr) +{ + if (Py_TYPE(descr) != &PyArrayDescr_Type) { + PyErr_Format(PyExc_RuntimeError, + "During creation/wrapping of legacy DType, the original class " + "was not PyArrayDescr_Type (it is replaced in this step)."); + return -1; + } + + /* + * Note: we have no intention of freeing the memory again since this + * behaves identically to static type definition (see comment above). + * This is seems cleaner for the legacy API, in the new API both static + * and heap types are possible (some difficulty arises from the fact that + * these are instances of DTypeMeta and not type). + * In particular our own DTypes can be true static declarations. + * However, this function remains necessary for legacy user dtypes. + */ + + const char *scalar_name = descr->typeobj->tp_name; + /* + * We have to take only the name, and ignore the module to get + * a reasonable __name__, since static types are limited in this regard + * (this is not ideal, but not a big issue in practice). + * This is what Python does to print __name__ for static types. + */ + const char *dot = strrchr(scalar_name, '.'); + if (dot) { + scalar_name = dot + 1; + } + ssize_t name_length = strlen(scalar_name) + 14; + + char *tp_name = malloc(name_length); + if (tp_name == NULL) { + PyErr_NoMemory(); + return -1; + } + + snprintf(tp_name, name_length, "numpy.dtype[%s]", scalar_name); + + PyArray_DTypeMeta *dtype_class = malloc(sizeof(PyArray_DTypeMeta)); + if (dtype_class == NULL) { + PyDataMem_FREE(tp_name); + return -1; + } + /* + * Initialize the struct fields identically to static code by copying + * a prototype instances for everything except our own fields which + * vary between the DTypes. + * In particular any Object initialization must be strictly copied from + * the untouched prototype to avoid complexities (e.g. with PyPy). + * Any Type slots need to be fixed before PyType_Ready, although most + * will be inherited automatically there. + */ + static PyArray_DTypeMeta prototype = { + {{ + PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) + .tp_name = NULL, /* set below */ + .tp_basicsize = sizeof(PyArray_Descr), + .tp_flags = Py_TPFLAGS_DEFAULT, + .tp_base = &PyArrayDescr_Type, + .tp_new = (newfunc)legacy_dtype_default_new, + },}, + .legacy = 1, + .abstract = 0, /* this is a concrete DType */ + /* Further fields are not common between DTypes */ + }; + memcpy(dtype_class, &prototype, sizeof(PyArray_DTypeMeta)); + /* Fix name of the Type*/ + ((PyTypeObject *)dtype_class)->tp_name = tp_name; + + /* Let python finish the initialization (probably unnecessary) */ + if (PyType_Ready((PyTypeObject *)dtype_class) < 0) { + return -1; + } + + /* + * Fill DTypeMeta information that varies between DTypes, any variable + * type information would need to be set before PyType_Ready(). + */ + dtype_class->singleton = descr; + Py_INCREF(descr->typeobj); + dtype_class->scalar_type = descr->typeobj; + dtype_class->type_num = descr->type_num; + dtype_class->type = descr->type; + dtype_class->f = descr->f; + dtype_class->kind = descr->kind; + + if (PyTypeNum_ISDATETIME(descr->type_num)) { + /* Datetimes are flexible, but were not considered previously */ + dtype_class->parametric = NPY_TRUE; + } + else if (PyTypeNum_ISFLEXIBLE(descr->type_num)) { + dtype_class->parametric = NPY_TRUE; + } + + /* Finally, replace the current class of the descr */ + Py_SET_TYPE(descr, (PyTypeObject *)dtype_class); + + return 0; +} + + +/* + * Simple exposed information, defined for each DType (class). This is + * preliminary (the flags should also return bools). + */ +static PyMemberDef dtypemeta_members[] = { + {"_abstract", + T_BYTE, offsetof(PyArray_DTypeMeta, abstract), READONLY, NULL}, + {"type", + T_OBJECT, offsetof(PyArray_DTypeMeta, scalar_type), READONLY, NULL}, + {"_parametric", + T_BYTE, offsetof(PyArray_DTypeMeta, parametric), READONLY, NULL}, + {NULL, 0, 0, 0, NULL}, +}; + + +NPY_NO_EXPORT PyTypeObject PyArrayDTypeMeta_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "numpy._DTypeMeta", + .tp_basicsize = sizeof(PyArray_DTypeMeta), + .tp_dealloc = (destructor)dtypemeta_dealloc, + /* Types are garbage collected (see dtypemeta_is_gc documentation) */ + .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, + .tp_doc = "Preliminary NumPy API: The Type of NumPy DTypes (metaclass)", + .tp_members = dtypemeta_members, + .tp_base = NULL, /* set to PyType_Type at import time */ + .tp_init = (initproc)dtypemeta_init, + .tp_new = dtypemeta_new, + .tp_is_gc = dtypemeta_is_gc, + .tp_traverse = (traverseproc)dtypemeta_traverse, +}; diff --git a/numpy/core/src/multiarray/dtypemeta.h b/numpy/core/src/multiarray/dtypemeta.h new file mode 100644 index 000000000..97152d1ad --- /dev/null +++ b/numpy/core/src/multiarray/dtypemeta.h @@ -0,0 +1,7 @@ +#ifndef _NPY_DTYPEMETA_H +#define _NPY_DTYPEMETA_H + +NPY_NO_EXPORT int +dtypemeta_wrap_legacy_descriptor(PyArray_Descr *dtypem); + +#endif /*_NPY_DTYPEMETA_H */ diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index 43dbde2f1..7aefbfc38 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -1655,11 +1655,12 @@ array_subscript(PyArrayObject *self, PyObject *op) goto finish; } - if (mit->numiter > 1) { + if (mit->numiter > 1 || mit->size == 0) { /* * If it is one, the inner loop checks indices, otherwise * check indices beforehand, because it is much faster if - * broadcasting occurs and most likely no big overhead + * broadcasting occurs and most likely no big overhead. + * The inner loop optimization skips index checks for size == 0 though. */ if (PyArray_MapIterCheckIndices(mit) < 0) { goto finish; @@ -2479,13 +2480,19 @@ PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit) int i; NPY_BEGIN_THREADS_DEF; - if (mit->size == 0) { - /* All indices got broadcast away, do *not* check as it always was */ + intp_type = PyArray_DescrFromType(NPY_INTP); + + if (NpyIter_GetIterSize(mit->outer) == 0) { + /* + * When the outer iteration is empty, the indices broadcast to an + * empty shape, and in this case we do not check if there are out + * of bounds indices. + * The code below does use the indices without broadcasting since + * broadcasting only repeats values. + */ return 0; } - intp_type = PyArray_DescrFromType(NPY_INTP); - NPY_BEGIN_THREADS; for (i=0; i < mit->numiter; i++) { @@ -2515,7 +2522,7 @@ PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit) if (check_and_adjust_index(&indval, outer_dim, outer_axis, _save) < 0) { Py_DECREF(intp_type); - return -1; + goto indexing_error; } data += stride; } @@ -2528,13 +2535,17 @@ PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit) op_iter = NpyIter_New(op, NPY_ITER_BUFFERED | NPY_ITER_NBO | NPY_ITER_ALIGNED | NPY_ITER_EXTERNAL_LOOP | NPY_ITER_GROWINNER | - NPY_ITER_READONLY, + NPY_ITER_READONLY | NPY_ITER_ZEROSIZE_OK, NPY_KEEPORDER, NPY_SAME_KIND_CASTING, intp_type); if (op_iter == NULL) { Py_DECREF(intp_type); return -1; } + if (NpyIter_GetIterSize(op_iter) == 0) { + NpyIter_Deallocate(op_iter); + continue; + } op_iternext = NpyIter_GetIterNext(op_iter, NULL); if (op_iternext == NULL) { @@ -2554,7 +2565,7 @@ PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit) outer_dim, outer_axis, _save) < 0) { Py_DECREF(intp_type); NpyIter_Deallocate(op_iter); - return -1; + goto indexing_error; } *iterptr += *iterstride; } @@ -2567,6 +2578,32 @@ PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit) NPY_END_THREADS; Py_DECREF(intp_type); return 0; + +indexing_error: + + if (mit->size == 0) { + PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL; + PyErr_Fetch(&err_type, &err_value, &err_traceback); + /* 2020-05-27, NumPy 1.20 */ + if (DEPRECATE( + "Out of bound index found. This was previously ignored " + "when the indexing result contained no elements. " + "In the future the index error will be raised. This error " + "occurs either due to an empty slice, or if an array has zero " + "elements even before indexing.\n" + "(Use `warnings.simplefilter('error')` to turn this " + "DeprecationWarning into an error and get more details on " + "the invalid index.)") < 0) { + npy_PyErr_ChainExceptions(err_type, err_value, err_traceback); + return -1; + } + Py_DECREF(err_type); + Py_DECREF(err_value); + Py_XDECREF(err_traceback); + return 0; + } + + return -1; } diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 6915371d8..84c22ba65 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -1872,6 +1872,7 @@ array_empty_like(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) /* steals the reference to dtype if it's not NULL */ ret = (PyArrayObject *)PyArray_NewLikeArrayWithShape(prototype, order, dtype, shape.len, shape.ptr, subok); + npy_free_cache_dim_obj(shape); if (!ret) { goto fail; } @@ -2497,9 +2498,9 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) "subscript is not within the valid range [0, 52)"); Py_DECREF(obj); return -1; - } + } } - + } Py_DECREF(obj); @@ -4445,6 +4446,18 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { if (set_matmul_flags(d) < 0) { goto err; } + + PyArrayDTypeMeta_Type.tp_base = &PyType_Type; + if (PyType_Ready(&PyArrayDTypeMeta_Type) < 0) { + goto err; + } + + PyArrayDescr_Type.tp_hash = PyArray_DescrHash; + Py_SET_TYPE(&PyArrayDescr_Type, &PyArrayDTypeMeta_Type); + if (PyType_Ready(&PyArrayDescr_Type) < 0) { + goto err; + } + initialize_casting_tables(); initialize_numeric_types(); if (initscalarmath(m) < 0) { @@ -4478,10 +4491,6 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { goto err; } - PyArrayDescr_Type.tp_hash = PyArray_DescrHash; - if (PyType_Ready(&PyArrayDescr_Type) < 0) { - goto err; - } if (PyType_Ready(&PyArrayFlags_Type) < 0) { goto err; } diff --git a/numpy/core/src/multiarray/scalarapi.c b/numpy/core/src/multiarray/scalarapi.c index 8a7139fb2..f3c440dc6 100644 --- a/numpy/core/src/multiarray/scalarapi.c +++ b/numpy/core/src/multiarray/scalarapi.c @@ -755,7 +755,7 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base) vobj->descr = descr; Py_INCREF(descr); vobj->obval = NULL; - Py_SIZE(vobj) = itemsize; + Py_SET_SIZE(vobj, itemsize); vobj->flags = NPY_ARRAY_CARRAY | NPY_ARRAY_F_CONTIGUOUS | NPY_ARRAY_OWNDATA; swap = 0; if (PyDataType_HASFIELDS(descr)) { diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index f13f50759..a7c3e847a 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -345,7 +345,7 @@ format_@name@(@type@ val, npy_bool scientific, * over-ride repr and str of array-scalar strings and unicode to * remove NULL bytes and then call the corresponding functions * of string and unicode. - * + * * FIXME: * is this really a good idea? * stop using Py_UNICODE here. @@ -1542,7 +1542,7 @@ static PyObject * return NULL; } #endif - + PyObject *tup; if (ndigits == Py_None) { tup = PyTuple_Pack(0); @@ -1568,7 +1568,7 @@ static PyObject * return ret; } #endif - + return obj; } /**end repeat**/ @@ -2774,7 +2774,7 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds) return PyErr_NoMemory(); } ((PyVoidScalarObject *)ret)->obval = destptr; - Py_SIZE((PyVoidScalarObject *)ret) = (int) memu; + Py_SET_SIZE((PyVoidScalarObject *)ret, (int) memu); ((PyVoidScalarObject *)ret)->descr = PyArray_DescrNewFromType(NPY_VOID); ((PyVoidScalarObject *)ret)->descr->elsize = (int) memu; diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c index 997467b4d..bc320138d 100644 --- a/numpy/core/src/multiarray/usertypes.c +++ b/numpy/core/src/multiarray/usertypes.c @@ -37,6 +37,7 @@ maintainer email: oliphant.travis@ieee.org #include "npy_pycompat.h" #include "usertypes.h" +#include "dtypemeta.h" NPY_NO_EXPORT PyArray_Descr **userdescrs=NULL; @@ -226,6 +227,11 @@ PyArray_RegisterDataType(PyArray_Descr *descr) return -1; } userdescrs[NPY_NUMUSERTYPES++] = descr; + + if (dtypemeta_wrap_legacy_descriptor(descr) < 0) { + return -1; + } + return typenum; } diff --git a/numpy/core/src/umath/_rational_tests.c.src b/numpy/core/src/umath/_rational_tests.c.src index 651019a84..13e33d0a5 100644 --- a/numpy/core/src/umath/_rational_tests.c.src +++ b/numpy/core/src/umath/_rational_tests.c.src @@ -1158,7 +1158,7 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { npyrational_arrfuncs.fill = npyrational_fill; npyrational_arrfuncs.fillwithscalar = npyrational_fillwithscalar; /* Left undefined: scanfunc, fromstr, sort, argsort */ - Py_TYPE(&npyrational_descr) = &PyArrayDescr_Type; + Py_SET_TYPE(&npyrational_descr, &PyArrayDescr_Type); npy_rational = PyArray_RegisterDataType(&npyrational_descr); if (npy_rational<0) { goto fail; diff --git a/numpy/core/tests/test_cpu_features.py b/numpy/core/tests/test_cpu_features.py index 3b5cb3157..337b7330c 100644 --- a/numpy/core/tests/test_cpu_features.py +++ b/numpy/core/tests/test_cpu_features.py @@ -16,23 +16,36 @@ class AbstractTest(object): def test_features(self): self.load_flags() for gname, features in self.features_groups.items(): - test_features = [self.features_map.get(f, f) in self.features_flags for f in features] + test_features = [self.cpu_have(f) for f in features] assert_equal(__cpu_features__.get(gname), all(test_features)) for feature_name in self.features: - map_name = self.features_map.get(feature_name, feature_name) - cpu_have = map_name in self.features_flags + cpu_have = self.cpu_have(feature_name) npy_have = __cpu_features__.get(feature_name) assert_equal(npy_have, cpu_have) - def load_flags_proc(self, magic_key): + def cpu_have(self, feature_name): + map_names = self.features_map.get(feature_name, feature_name) + if isinstance(map_names, str): + return map_names in self.features_flags + for f in map_names: + if f in self.features_flags: + return True + return False + + def load_flags_cpuinfo(self, magic_key): + self.features_flags = self.get_cpuinfo_item(magic_key) + + def get_cpuinfo_item(self, magic_key): + values = set() with open('/proc/cpuinfo') as fd: for line in fd: if not line.startswith(magic_key): continue flags_value = [s.strip() for s in line.split(':', 1)] if len(flags_value) == 2: - self.features_flags = self.features_flags.union(flags_value[1].upper().split()) + values = values.union(flags_value[1].upper().split()) + return values def load_flags_auxv(self): import subprocess @@ -75,7 +88,7 @@ class Test_X86_Features(AbstractTest): AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ", ) def load_flags(self): - self.load_flags_proc("flags") + self.load_flags_cpuinfo("flags") is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE) @pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power") @@ -97,8 +110,18 @@ class Test_ARM_Features(AbstractTest): NEON_VFPV4 = ["NEON", "VFPV4"], ) def load_flags(self): - self.load_flags_proc("Features") - if re.match("^(aarch64|AARCH64)", platform.machine()): + self.load_flags_cpuinfo("Features") + arch = self.get_cpuinfo_item("CPU architecture") + # in case of mounting virtual filesystem of aarch64 kernel + is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0 + if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8: self.features_map = dict( NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD" ) + else: + self.features_map = dict( + # ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32) + # doesn't provide information about ASIMD, so we assume that ASIMD is supported + # if the kernel reports any one of the following ARM8 features. + ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32") + ) diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 82d24e0f7..523638a35 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -618,3 +618,30 @@ class BuiltInRoundComplexDType(_DeprecationTestCase): self.assert_not_deprecated(round, args=(scalar,)) self.assert_not_deprecated(round, args=(scalar, 0)) self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) + + +class TestIncorrectAdvancedIndexWithEmptyResult(_DeprecationTestCase): + # 2020-05-27, NumPy 1.20.0 + message = "Out of bound index found. This was previously ignored.*" + + @pytest.mark.parametrize("index", [([3, 0],), ([0, 0], [3, 0])]) + def test_empty_subspace(self, index): + # Test for both a single and two/multiple advanced indices. These + # This will raise an IndexError in the future. + arr = np.ones((2, 2, 0)) + self.assert_deprecated(arr.__getitem__, args=(index,)) + self.assert_deprecated(arr.__setitem__, args=(index, 0.)) + + # for this array, the subspace is only empty after applying the slice + arr2 = np.ones((2, 2, 1)) + index2 = (slice(0, 0),) + index + self.assert_deprecated(arr2.__getitem__, args=(index2,)) + self.assert_deprecated(arr2.__setitem__, args=(index2, 0.)) + + def test_empty_index_broadcast_not_deprecated(self): + arr = np.ones((2, 2, 2)) + + index = ([[3], [2]], []) # broadcast to an empty result. + self.assert_not_deprecated(arr.__getitem__, args=(index,)) + self.assert_not_deprecated(arr.__setitem__, + args=(index, np.empty((2, 0, 2)))) diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index c9a65cd9c..73aa01de6 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -1091,6 +1091,40 @@ class TestFromDTypeAttribute: with pytest.raises(RecursionError): np.dtype(dt(1)) + +class TestDTypeClasses: + @pytest.mark.parametrize("dtype", list(np.typecodes['All']) + [rational]) + def test_basic_dtypes_subclass_properties(self, dtype): + # Note: Except for the isinstance and type checks, these attributes + # are considered currently private and may change. + dtype = np.dtype(dtype) + assert isinstance(dtype, np.dtype) + assert type(dtype) is not np.dtype + assert type(dtype).__name__ == f"dtype[{dtype.type.__name__}]" + assert type(dtype).__module__ == "numpy" + assert not type(dtype)._abstract + + # the flexible dtypes and datetime/timedelta have additional parameters + # which are more than just storage information, these would need to be + # given when creating a dtype: + parametric = (np.void, np.str_, np.bytes_, np.datetime64, np.timedelta64) + if dtype.type not in parametric: + assert not type(dtype)._parametric + assert type(dtype)() is dtype + else: + assert type(dtype)._parametric + with assert_raises(TypeError): + type(dtype)() + + def test_dtype_superclass(self): + assert type(np.dtype) is not type + assert isinstance(np.dtype, type) + + assert type(np.dtype).__name__ == "_DTypeMeta" + assert type(np.dtype).__module__ == "numpy" + assert np.dtype._abstract + + class TestFromCTypes: @staticmethod diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index ab5ec266e..1a8268eb8 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -845,6 +845,13 @@ class TestCreation: assert_raises(ValueError, np.zeros, shape, dtype=np.int8) assert_raises(ValueError, np.ones, shape, dtype=np.int8) + @pytest.mark.skipif(np.dtype(np.intp).itemsize != 8, + reason="malloc may not fail on 32 bit systems") + def test_malloc_fails(self): + # This test is guaranteed to fail due to a too large allocation + with assert_raises(np.core._exceptions._ArrayMemoryError): + np.empty(np.iinfo(np.intp).max, dtype=np.uint8) + def test_zeros(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] for dt in types: diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index acd442e2f..2a87ffaf8 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -31,6 +31,17 @@ class TestResize: Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]]) assert_equal(np.resize(A, (4, 3)), Ar3) + def test_repeats(self): + A = np.array([1, 2, 3]) + Ar1 = np.array([[1, 2, 3, 1], [2, 3, 1, 2]]) + assert_equal(np.resize(A, (2, 4)), Ar1) + + Ar2 = np.array([[1, 2], [3, 1], [2, 3], [1, 2]]) + assert_equal(np.resize(A, (4, 2)), Ar2) + + Ar3 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) + assert_equal(np.resize(A, (4, 3)), Ar3) + def test_zeroresize(self): A = np.array([[1, 2], [3, 4]]) Ar = np.resize(A, (0,)) @@ -50,6 +61,23 @@ class TestResize: assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype)) assert_equal(A.dtype, Ar.dtype) + def test_negative_resize(self): + A = np.arange(0, 10, dtype=np.float32) + new_shape = (-10, -1) + with pytest.raises(ValueError, match=r"negative"): + np.resize(A, new_shape=new_shape) + + def test_subclass(self): + class MyArray(np.ndarray): + __array_priority__ = 1. + + my_arr = np.array([1]).view(MyArray) + assert type(np.resize(my_arr, 5)) is MyArray + assert type(np.resize(my_arr, 0)) is MyArray + + my_arr = np.array([]).view(MyArray) + assert type(np.resize(my_arr, 5)) is MyArray + class TestNonarrayArgs: # check that non-array arguments to functions wrap them in arrays diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 6750bf705..ecfc71ae3 100755 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -194,7 +194,7 @@ PyMODINIT_FUNC PyInit_#modulename#(void) { \tint i; \tPyObject *m,*d, *s, *tmp; \tm = #modulename#_module = PyModule_Create(&moduledef); -\tPy_TYPE(&PyFortran_Type) = &PyType_Type; +\tPy_SET_TYPE(&PyFortran_Type, &PyType_Type); \timport_array(); \tif (PyErr_Occurred()) \t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;} diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index 83c0da2cf..0db33e714 100644 --- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -144,7 +144,7 @@ static struct PyModuleDef moduledef = { PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { PyObject *m,*d, *s; m = wrap_module = PyModule_Create(&moduledef); - Py_TYPE(&PyFortran_Type) = &PyType_Type; + Py_SET_TYPE(&PyFortran_Type, &PyType_Type); import_array(); if (PyErr_Occurred()) Py_FatalError("can't initialize module wrap (failed to import numpy)"); diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 48b0a0830..7a23aeab7 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1334,7 +1334,7 @@ def interp(x, xp, fp, left=None, right=None, period=None): See Also -------- - scipy.interpolate + scipy.interpolate Notes ----- @@ -3273,10 +3273,17 @@ def _sinc_dispatcher(x): @array_function_dispatch(_sinc_dispatcher) def sinc(x): - """ - Return the sinc function. + r""" + Return the normalized sinc function. + + The sinc function is :math:`\sin(\pi x)/(\pi x)`. + + .. note:: - The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. + Note the normalization factor of ``pi`` used in the definition. + This is the most commonly used definition in signal processing. + Use ``sinc(x / np.pi)`` to obtain the unnormalized sinc function + :math:`\sin(x)/(x)` that is more common in mathematics. Parameters ---------- diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 99d119362..664bfe6e5 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -1824,16 +1824,11 @@ M 33 21.99 data[10 * i] = "2, 2, 2, 2 2" data.insert(0, "a, b, c, d, e") mdata = TextIO("\n".join(data)) - # + kwargs = dict(delimiter=",", dtype=None, names=True) - # XXX: is there a better way to get the return value of the - # callable in assert_warns ? - ret = {} - - def f(_ret={}): - _ret['mtest'] = np.genfromtxt(mdata, invalid_raise=False, **kwargs) - assert_warns(ConversionWarning, f, _ret=ret) - mtest = ret['mtest'] + def f(): + return np.genfromtxt(mdata, invalid_raise=False, **kwargs) + mtest = assert_warns(ConversionWarning, f) assert_equal(len(mtest), 45) assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde'])) # @@ -1848,16 +1843,12 @@ M 33 21.99 data[10 * i] = "2, 2, 2, 2 2" data.insert(0, "a, b, c, d, e") mdata = TextIO("\n".join(data)) + kwargs = dict(delimiter=",", dtype=None, names=True, invalid_raise=False) - # XXX: is there a better way to get the return value of the - # callable in assert_warns ? - ret = {} - - def f(_ret={}): - _ret['mtest'] = np.genfromtxt(mdata, usecols=(0, 4), **kwargs) - assert_warns(ConversionWarning, f, _ret=ret) - mtest = ret['mtest'] + def f(): + return np.genfromtxt(mdata, usecols=(0, 4), **kwargs) + mtest = assert_warns(ConversionWarning, f) assert_equal(len(mtest), 45) assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae'])) # diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 4097a6738..f8789af90 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -1768,12 +1768,10 @@ def assert_warns(warning_class, *args, **kwargs): ---------- warning_class : class The class defining the warning that `func` is expected to throw. - func : callable - The callable to test. - \\*args : Arguments - Arguments passed to `func`. + \\*args : List of function and arguments + `func` and arguments for `func`. \\*\\*kwargs : Kwargs - Keyword arguments passed to `func`. + Keyword arguments for `func`. Returns ------- diff --git a/test_requirements.txt b/test_requirements.txt index 607fabe1e..ffb27d7ec 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,8 +1,8 @@ -cython==0.29.17 -hypothesis==5.14.0 +cython==0.29.19 +hypothesis==5.15.1 pytest==5.4.2 pytz==2020.1 -pytest-cov==2.8.1 +pytest-cov==2.9.0 pickle5; python_version == '3.7' pickle5; python_version == '3.6' and platform_python_implementation != 'PyPy' # for numpy.random.test.test_extending diff --git a/tools/download-wheels.py b/tools/download-wheels.py index 1e26e0e63..941440ca9 100644 --- a/tools/download-wheels.py +++ b/tools/download-wheels.py @@ -16,7 +16,7 @@ __version__ = '0.1' # Edit these for other projects. STAGING_URL = 'https://anaconda.org/multibuild-wheels-staging/numpy' -PREFIX = '^.*numpy-' +PREFIX = 'numpy' def get_wheel_names(version): """ Get wheel names from Anaconda HTML directory. @@ -31,8 +31,8 @@ def get_wheel_names(version): """ http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED') - tmpl = re.compile(rf"{PREFIX}{version}.*\.whl$") - index_url = f"{STAGING_URL}/files" + tmpl = re.compile(rf"^.*{PREFIX}-{version}-.*\.whl$") + index_url = f"{STAGING_URL}/files" index_html = http.request('GET', index_url) soup = BeautifulSoup(index_html.data, 'html.parser') return soup.findAll(text=tmpl) @@ -60,7 +60,7 @@ def download_wheels(version, wheelhouse): wheel_path = os.path.join(wheelhouse, wheel_name) with open(wheel_path, 'wb') as f: with http.request('GET', wheel_url, preload_content=False,) as r: - print(f"Downloading wheel {i + 1}, name: {wheel_name}") + print(f"{i + 1:<4}{wheel_name}") shutil.copyfileobj(r, f) print(f"\nTotal files downloaded: {len(wheel_names)}") diff --git a/tools/openblas_support.py b/tools/openblas_support.py index 6b2ad0f8c..cbb6a5e43 100644 --- a/tools/openblas_support.py +++ b/tools/openblas_support.py @@ -207,12 +207,12 @@ def make_init(dirname): and is created as part of the scripts that build the wheel. ''' import os - from ctypes import WinDLL import glob if os.name == 'nt': # convention for storing / loading the DLL from # numpy/.libs/, if present try: + from ctypes import WinDLL basedir = os.path.dirname(__file__) except: pass @@ -221,16 +221,16 @@ def make_init(dirname): DLL_filenames = [] if os.path.isdir(libs_dir): for filename in glob.glob(os.path.join(libs_dir, - '*openblas*dll')): + '*openblas*dll')): # NOTE: would it change behavior to load ALL # DLLs at this path vs. the name restriction? WinDLL(os.path.abspath(filename)) DLL_filenames.append(filename) - if len(DLL_filenames) > 1: - import warnings - warnings.warn("loaded more than 1 DLL from .libs:\\n%s" % - "\\n".join(DLL_filenames), - stacklevel=1) + if len(DLL_filenames) > 1: + import warnings + warnings.warn("loaded more than 1 DLL from .libs:\\n%s" % + "\\n".join(DLL_filenames), + stacklevel=1) """)) def test_setup(arches): diff --git a/tools/travis-upload-wheel.sh b/tools/travis-upload-wheel.sh deleted file mode 100755 index 06a8f3eba..000000000 --- a/tools/travis-upload-wheel.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -# -set -ex - -export CLOUD_CONTAINER_NAME=travis-dev-wheels - -if [[ ( ${USE_WHEEL} == 1 ) \ - && ( "${TRAVIS_BRANCH}" == "master" ) \ - && ( "${TRAVIS_PULL_REQUEST}" == "false" ) ]]; then - pip install wheelhouse_uploader - python -m wheelhouse_uploader upload --local-folder \ - ${TRAVIS_BUILD_DIR}/dist/ ${CLOUD_CONTAINER_NAME} -fi |
