diff options
66 files changed, 1473 insertions, 1347 deletions
diff --git a/.github/workflows/circleci.yml b/.github/workflows/circleci.yml new file mode 100644 index 000000000..de02ac6d3 --- /dev/null +++ b/.github/workflows/circleci.yml @@ -0,0 +1,12 @@ +on: [status] +jobs: + circleci_artifacts_redirector_job: + runs-on: ubuntu-latest + name: Run CircleCI artifacts redirector + steps: + - name: GitHub Action step + uses: larsoner/circleci-artifacts-redirector-action@master + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + artifact-path: 0/doc/build/html/index.html + circleci-jobs: build diff --git a/.travis.yml b/.travis.yml index d51a1d223..85f6127cd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -27,16 +27,6 @@ stages: # Do the rest of the tests - name: Comprehensive tests -env: - global: - - WHEELHOUSE_UPLOADER_USERNAME=travis.numpy - # The following is generated with the command: - # travis encrypt -r numpy/numpy WHEELHOUSE_UPLOADER_SECRET=tH3AP1KeY - - secure: "IEicLPrP2uW+jW51GRwkONQpdPqMVtQL5bdroqR/U8r9Tr\ - XrbCVRhp4AP8JYZT0ptoBpmZWWGjmKBndB68QlMiUjQPow\ - iFWt9Ka92CaqYdU7nqfWp9VImSndPmssjmCXJ1v1IjZPAM\ - ahp7Qnm0rWRmA0z9SomuRUQOJQ6s684vU=" - jobs: include: # Do all python versions without environment variables set @@ -46,6 +36,7 @@ jobs: - stage: Comprehensive tests python: 3.6 - python: 3.7 + - python: 3.9-dev - python: 3.6 env: USE_DEBUG=1 @@ -70,8 +61,8 @@ jobs: - BLAS=None - LAPACK=None - ATLAS=None - - NPY_BLAS_ORDER=mkl,blis,openblas,atlas,accelerate,blas - - NPY_LAPACK_ORDER=MKL,OPENBLAS,ATLAS,ACCELERATE,LAPACK + - NPY_BLAS_ORDER=mkl,blis,openblas,atlas,blas + - NPY_LAPACK_ORDER=MKL,OPENBLAS,ATLAS,LAPACK - USE_ASV=1 - python: 3.7 @@ -133,6 +124,3 @@ before_install: script: - ./tools/travis-test.sh - -after_success: - - ./tools/travis-upload-wheel.sh diff --git a/MANIFEST.in b/MANIFEST.in index b58f85d4d..f710c92e6 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -21,7 +21,6 @@ include numpy/__init__.pxd # Note that sub-directories that don't have __init__ are apparently not # included by 'recursive-include', so list those separately recursive-include numpy * -recursive-include numpy/_build_utils * recursive-include numpy/linalg/lapack_lite * recursive-include tools * # Add sdist files whose use depends on local configuration. diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 564f5d8e8..ea2b414b0 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -75,10 +75,13 @@ stages: PYTHON_VERSION: '3.6' NPY_USE_BLAS_ILP64: '1' USE_OPENBLAS: '1' - USE_XCODE_10: '1' - Accelerate: - PYTHON_VERSION: '3.6' - USE_OPENBLAS: '0' + # Disable this job: the azure images do not create the problematic + # symlink from Accelerate to OpenBLAS. We still have the test + # at import to detect a buggy Accelerate, just cannot easily trigger + # it with azure. + # Accelerate: + # PYTHON_VERSION: '3.6' + # USE_OPENBLAS: '0' steps: # the @0 refers to the (major) version of the *task* on Microsoft's @@ -145,7 +148,6 @@ stages: BLAS: None LAPACK: None ATLAS: None - ACCELERATE: None CC: /usr/bin/clang condition: eq(variables['USE_OPENBLAS'], '1') - script: python setup.py build -j 4 build_ext --inplace install diff --git a/doc/TESTS.rst.txt b/doc/TESTS.rst.txt index af47fe99c..4ab39c586 100644 --- a/doc/TESTS.rst.txt +++ b/doc/TESTS.rst.txt @@ -12,7 +12,7 @@ the `pytest`_ framework. The older framework is still maintained in order to support downstream projects that use the old numpy framework, but all tests for NumPy should use pytest. -Our goal is that every module and package in SciPy and NumPy +Our goal is that every module and package in NumPy should have a thorough set of unit tests. These tests should exercise the full functionality of a given routine as well as its robustness to erroneous or unexpected input @@ -28,26 +28,30 @@ is found in a routine, you should write a new test for that specific case and add it to the test suite to prevent that bug from creeping back in unnoticed. -To run SciPy's full test suite, use the following:: +.. note:: - >>> import scipy - >>> scipy.test() + SciPy uses the testing framework from :mod:`numpy.testing`, + so all of the NumPy examples shown below are also applicable to SciPy -or from the command line:: +Testing NumPy +''''''''''''' - $ python runtests.py +NumPy can be tested in a number of ways, choose any way you feel comfortable. + +Running tests from inside Python +-------------------------------- -SciPy uses the testing framework from :mod:`numpy.testing`, so all -the SciPy examples shown here are also applicable to NumPy. NumPy's full test -suite can be run as follows:: +You can test an installed NumPy by `numpy.test`, for example, +To run NumPy's full test suite, use the following:: >>> import numpy - >>> numpy.test() + >>> numpy.test(label='slow') -The test method may take two or more arguments; the first, ``label`` is a -string specifying what should be tested and the second, ``verbose`` is an -integer giving the level of output verbosity. See the docstring for -numpy.test for details. The default value for ``label`` is 'fast' - which +The test method may take two or more arguments; the first ``label`` is a +string specifying what should be tested and the second ``verbose`` is an +integer giving the level of output verbosity. See the docstring +`numpy.test` +for details. The default value for ``label`` is 'fast' - which will run the standard tests. The string 'full' will run the full battery of tests, including those identified as being slow to run. If ``verbose`` is 1 or less, the tests will just show information messages about the tests @@ -55,38 +59,43 @@ that are run; but if it is greater than 1, then the tests will also provide warnings on missing tests. So if you want to run every test and get messages about which modules don't have tests:: - >>> scipy.test(label='full', verbose=2) # or scipy.test('full', 2) + >>> numpy.test(label='full', verbose=2) # or numpy.test('full', 2) -Finally, if you are only interested in testing a subset of SciPy, for -example, the ``integrate`` module, use the following:: +Finally, if you are only interested in testing a subset of NumPy, for +example, the ``core`` module, use the following:: - >>> scipy.integrate.test() + >>> numpy.core.test() -or from the command line:: +Running tests from the command line +----------------------------------- - $python runtests.py -t scipy/integrate/tests +If you want to build NumPy in order to work on NumPy itself, use +``runtests.py``.To run NumPy's full test suite:: -The rest of this page will give you a basic idea of how to add unit -tests to modules in SciPy. It is extremely important for us to have -extensive unit testing since this code is going to be used by -scientists and researchers and is being developed by a large number of -people spread across the world. So, if you are writing a package that -you'd like to become part of SciPy, please write the tests as you -develop the package. Also since much of SciPy is legacy code that was -originally written without unit tests, there are still several modules -that don't have tests yet. Please feel free to choose one of these -modules and develop tests for it as you read through -this introduction. + $ python runtests.py + +Testing a subset of NumPy:: + + $python runtests.py -t numpy/core/tests + +For detailed info on testing, see :ref:`testing-builds` + +Other methods of running tests +------------------------------ + +Run tests using your favourite IDE such as `vscode`_ or `pycharm`_ Writing your own tests '''''''''''''''''''''' -Every Python module, extension module, or subpackage in the SciPy +If you are writing a package that you'd like to become part of NumPy, +please write the tests as you develop the package. +Every Python module, extension module, or subpackage in the NumPy package directory should have a corresponding ``test_<name>.py`` file. -Pytest examines these files for test methods (named test*) and test -classes (named Test*). +Pytest examines these files for test methods (named ``test*``) and test +classes (named ``Test*``). -Suppose you have a SciPy module ``scipy/xxx/yyy.py`` containing a +Suppose you have a NumPy module ``numpy/xxx/yyy.py`` containing a function ``zzz()``. To test this function you would create a test module called ``test_yyy.py``. If you only need to test one aspect of ``zzz``, you can simply add a test function:: @@ -100,7 +109,7 @@ a test class:: from numpy.testing import assert_, assert_raises # import xxx symbols - from scipy.xxx.yyy import zzz + from numpy.xxx.yyy import zzz class TestZzz: def test_simple(self): @@ -119,6 +128,11 @@ that makes it hard to identify the test from the output of running the test suite with ``verbose=2`` (or similar verbosity setting). Use plain comments (``#``) if necessary. +Also since much of NumPy is legacy code that was +originally written without unit tests, there are still several modules +that don't have tests yet. Please feel free to choose one of these +modules and develop tests for it. + Labeling tests -------------- @@ -126,8 +140,8 @@ As an alternative to ``pytest.mark.<label>``, there are a number of labels you can use. Unlabeled tests like the ones above are run in the default -``scipy.test()`` run. If you want to label your test as slow - and -therefore reserved for a full ``scipy.test(label='full')`` run, you +``numpy.test()`` run. If you want to label your test as slow - and +therefore reserved for a full ``numpy.test(label='full')`` run, you can label it with a decorator:: # numpy.testing module includes 'import decorators as dec' @@ -211,10 +225,10 @@ for numpy.lib:: >>> np.lib.test(doctests=True) The doctests are run as if they are in a fresh Python instance which -has executed ``import numpy as np``. Tests that are part of a SciPy +has executed ``import numpy as np``. Tests that are part of a NumPy subpackage will have that subpackage already imported. E.g. for a test -in ``scipy/linalg/tests/``, the namespace will be created such that -``from scipy import linalg`` has already executed. +in ``numpy/linalg/tests/``, the namespace will be created such that +``from numpy import linalg`` has already executed. ``tests/`` @@ -223,15 +237,15 @@ in ``scipy/linalg/tests/``, the namespace will be created such that Rather than keeping the code and the tests in the same directory, we put all the tests for a given subpackage in a ``tests/`` subdirectory. For our example, if it doesn't already exist you will -need to create a ``tests/`` directory in ``scipy/xxx/``. So the path -for ``test_yyy.py`` is ``scipy/xxx/tests/test_yyy.py``. +need to create a ``tests/`` directory in ``numpy/xxx/``. So the path +for ``test_yyy.py`` is ``numpy/xxx/tests/test_yyy.py``. -Once the ``scipy/xxx/tests/test_yyy.py`` is written, its possible to +Once the ``numpy/xxx/tests/test_yyy.py`` is written, its possible to run the tests by going to the ``tests/`` directory and typing:: python test_yyy.py -Or if you add ``scipy/xxx/tests/`` to the Python path, you could run +Or if you add ``numpy/xxx/tests/`` to the Python path, you could run the tests interactively in the interpreter like this:: >>> import test_yyy @@ -262,14 +276,14 @@ section of your setup.py:: Now you can do the following to test your module:: - >>> import scipy - >>> scipy.xxx.test() + >>> import numpy + >>> numpy.xxx.test() -Also, when invoking the entire SciPy test suite, your tests will be +Also, when invoking the entire NumPy test suite, your tests will be found and run:: - >>> import scipy - >>> scipy.test() + >>> import numpy + >>> numpy.test() # your tests are included and run automatically! Tips & Tricks @@ -370,7 +384,14 @@ failures without requiring a fixed seed, reporting *minimal* examples for each failure, and better-than-naive-random techniques for triggering bugs. +Documentation for ``numpy.test`` +-------------------------------- + +.. autofunction:: numpy.test + .. _nose: https://nose.readthedocs.io/en/latest/ .. _pytest: https://pytest.readthedocs.io .. _parameterization: https://docs.pytest.org/en/latest/parametrize.html .. _Hypothesis: https://hypothesis.readthedocs.io/en/latest/ +.. _vscode: https://code.visualstudio.com/docs/python/testing#_enable-a-test-framework +.. _pycharm: https://www.jetbrains.com/help/pycharm/testing-your-first-python-application.html diff --git a/doc/neps/nep-0022-ndarray-duck-typing-overview.rst b/doc/neps/nep-0022-ndarray-duck-typing-overview.rst index 077166453..9ece915a7 100644 --- a/doc/neps/nep-0022-ndarray-duck-typing-overview.rst +++ b/doc/neps/nep-0022-ndarray-duck-typing-overview.rst @@ -23,7 +23,7 @@ Detailed description -------------------- Traditionally, NumPy’s ``ndarray`` objects have provided two things: a -high level API for expression operations on homogenously-typed, +high level API for expression operations on homogeneously-typed, arbitrary-dimensional, array-structured data, and a concrete implementation of the API based on strided in-RAM storage. The API is powerful, fairly general, and used ubiquitously across the scientific diff --git a/doc/release/upcoming_changes/15759.improvement.rst b/doc/release/upcoming_changes/15759.improvement.rst new file mode 100644 index 000000000..0a1b255f7 --- /dev/null +++ b/doc/release/upcoming_changes/15759.improvement.rst @@ -0,0 +1,4 @@ +Remove the Accelerate library as a candidate LAPACK library +----------------------------------------------------------- +Apple no longer supports Accelerate. Remove it. + diff --git a/doc/release/upcoming_changes/15997.improvement.rst b/doc/release/upcoming_changes/15997.improvement.rst new file mode 100644 index 000000000..9b5feacb8 --- /dev/null +++ b/doc/release/upcoming_changes/15997.improvement.rst @@ -0,0 +1,12 @@ +Object arrays containing multi-line objects have a more readable ``repr`` +------------------------------------------------------------------------- +If elements of an object array have a ``repr`` containing new lines, then the +wrapped lines will be aligned by column. Notably, this improves the ``repr`` of +nested arrays:: + + >>> np.array([np.eye(2), np.eye(3)], dtype=object) + array([array([[1., 0.], + [0., 1.]]), + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]])], dtype=object) diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 6b6f366df..6c8793342 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -326,9 +326,8 @@ NumPy provides several hooks that classes can customize: If a class (ndarray subclass or not) having the :func:`__array__` method is used as the output object of an :ref:`ufunc - <ufuncs-output-type>`, results will be written to the object - returned by :func:`__array__`. Similar conversion is done on - input arrays. + <ufuncs-output-type>`, results will *not* be written to the object + returned by :func:`__array__`. This practice will return ``TypeError``. Matrix objects diff --git a/doc/source/reference/arrays.interface.rst b/doc/source/reference/arrays.interface.rst index f36a083aa..4e95535c0 100644 --- a/doc/source/reference/arrays.interface.rst +++ b/doc/source/reference/arrays.interface.rst @@ -71,7 +71,7 @@ This approach to the interface consists of the object having an **typestr** (required) - A string providing the basic type of the homogenous array The + A string providing the basic type of the homogeneous array The basic string format consists of 3 parts: a character describing the byteorder of the data (``<``: little-endian, ``>``: big-endian, ``|``: not-relevant), a character code giving the diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst index faa91a389..497dd9cd6 100644 --- a/doc/source/reference/arrays.rst +++ b/doc/source/reference/arrays.rst @@ -11,7 +11,7 @@ NumPy provides an N-dimensional array type, the :ref:`ndarray type. The items can be :ref:`indexed <arrays.indexing>` using for example N integers. -All ndarrays are :term:`homogenous`: every item takes up the same size +All ndarrays are :term:`homogeneous`: every item takes up the same size block of memory, and all blocks are interpreted in exactly the same way. How each item in the array is to be interpreted is specified by a separate :ref:`data-type object <arrays.dtypes>`, one of which is associated diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index ad2cd2d80..bd44b70da 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -100,8 +100,8 @@ What’s the difference between a Python list and a NumPy array? NumPy gives you an enormous range of fast and efficient ways of creating arrays and manipulating numerical data inside them. While a Python list can contain different data types within a single list, all of the elements in a NumPy array -should be homogenous. The mathematical operations that are meant to be performed -on arrays would be extremely inefficient if the arrays weren't homogenous. +should be homogeneous. The mathematical operations that are meant to be performed +on arrays would be extremely inefficient if the arrays weren't homogeneous. **Why use NumPy?** diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst index 546fd7daf..47a0a03c9 100644 --- a/doc/source/user/building.rst +++ b/doc/source/user/building.rst @@ -123,8 +123,7 @@ The default order for the libraries are: 2. BLIS 3. OpenBLAS 4. ATLAS -5. Accelerate (MacOS) -6. BLAS (NetLIB) +5. BLAS (NetLIB) If you wish to build against OpenBLAS but you also have BLIS available one may predefine the order of searching via the environment variable @@ -146,8 +145,7 @@ The default order for the libraries are: 2. OpenBLAS 3. libFLAME 4. ATLAS -5. Accelerate (MacOS) -6. LAPACK (NetLIB) +5. LAPACK (NetLIB) If you wish to build against OpenBLAS but you also have MKL available one diff --git a/doc/sphinxext b/doc/sphinxext -Subproject a482f66913c1079d7439770f0119b55376bb1b8 +Subproject b4c5fd17e2b85c2416a5e586933eee353b58bf7 diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index d5c50d9bf..338da3d7e 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -26,6 +26,7 @@ cimport libc.stdio as stdio cdef extern from "Python.h": ctypedef int Py_intptr_t + bint PyObject_TypeCheck(object obj, PyTypeObject* type) cdef extern from "numpy/arrayobject.h": ctypedef Py_intptr_t npy_intp @@ -220,7 +221,7 @@ cdef extern from "numpy/arrayobject.h": cdef int type_num cdef int itemsize "elsize" cdef int alignment - cdef dict fields + cdef object fields cdef tuple names # Use PyDataType_HASSUBARRAY to test whether this field is # valid (the pointer can be NULL). Most users should access @@ -828,6 +829,45 @@ cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset return f +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + +cdef extern from "numpy/arrayscalars.h": + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + + # # ufunc API # @@ -976,3 +1016,57 @@ cdef extern from *: """ /* NumPy API declarations from "numpy/__init__.pxd" */ """ + + +cdef inline bint is_timedelta64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (<PyDatetimeScalarObject*>obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (<PyTimedeltaScalarObject*>obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return <NPY_DATETIMEUNIT>(<PyDatetimeScalarObject*>obj).obmeta.base diff --git a/numpy/_build_utils/README b/numpy/_build_utils/README deleted file mode 100644 index 6976e0233..000000000 --- a/numpy/_build_utils/README +++ /dev/null @@ -1,8 +0,0 @@ -======= -WARNING -======= - -This directory (numpy/_build_utils) is *not* part of the public numpy API, - - it is internal build support for numpy. - - it is only present in source distributions or during an in place build - - it is *not* installed with the rest of numpy diff --git a/numpy/_build_utils/__init__.py b/numpy/_build_utils/__init__.py deleted file mode 100644 index e69de29bb..000000000 --- a/numpy/_build_utils/__init__.py +++ /dev/null diff --git a/numpy/_build_utils/apple_accelerate.py b/numpy/_build_utils/apple_accelerate.py deleted file mode 100644 index b26aa12ad..000000000 --- a/numpy/_build_utils/apple_accelerate.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import sys -import re - -__all__ = ['uses_accelerate_framework', 'get_sgemv_fix'] - -def uses_accelerate_framework(info): - """ Returns True if Accelerate framework is used for BLAS/LAPACK """ - # If we're not building on Darwin (macOS), don't use Accelerate - if sys.platform != "darwin": - return False - # If we're building on macOS, but targeting a different platform, - # don't use Accelerate. - if os.getenv('_PYTHON_HOST_PLATFORM', None): - return False - r_accelerate = re.compile("Accelerate") - extra_link_args = info.get('extra_link_args', '') - for arg in extra_link_args: - if r_accelerate.search(arg): - return True - return False - -def get_sgemv_fix(): - """ Returns source file needed to correct SGEMV """ - path = os.path.abspath(os.path.dirname(__file__)) - return [os.path.join(path, 'src', 'apple_sgemv_fix.c')] diff --git a/numpy/_build_utils/src/apple_sgemv_fix.c b/numpy/_build_utils/src/apple_sgemv_fix.c deleted file mode 100644 index b1dbeb681..000000000 --- a/numpy/_build_utils/src/apple_sgemv_fix.c +++ /dev/null @@ -1,253 +0,0 @@ -/* This is a collection of ugly hacks to circumvent a bug in - * Apple Accelerate framework's SGEMV subroutine. - * - * See: https://github.com/numpy/numpy/issues/4007 - * - * SGEMV in Accelerate framework will segfault on MacOS X version 10.9 - * (aka Mavericks) if arrays are not aligned to 32 byte boundaries - * and the CPU supports AVX instructions. This can produce segfaults - * in np.dot. - * - * This patch overshadows the symbols cblas_sgemv, sgemv_ and sgemv - * exported by Accelerate to produce the correct behavior. The MacOS X - * version and CPU specs are checked on module import. If Mavericks and - * AVX are detected the call to SGEMV is emulated with a call to SGEMM - * if the arrays are not 32 byte aligned. If the exported symbols cannot - * be overshadowed on module import, a fatal error is produced and the - * process aborts. All the fixes are in a self-contained C file - * and do not alter the multiarray C code. The patch is not applied - * unless NumPy is configured to link with Apple's Accelerate - * framework. - * - */ - -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include "Python.h" -#include "numpy/arrayobject.h" - -#include <string.h> -#include <dlfcn.h> -#include <stdlib.h> -#include <stdio.h> -#include <sys/types.h> -#include <sys/sysctl.h> -#include <string.h> - -/* ----------------------------------------------------------------- */ -/* Original cblas_sgemv */ - -#define VECLIB_FILE "/System/Library/Frameworks/Accelerate.framework/Versions/A/Frameworks/vecLib.framework/vecLib" - -enum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102}; -enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113}; -extern void cblas_xerbla(int info, const char *rout, const char *form, ...); - -typedef void cblas_sgemv_t(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const float alpha, const float *A, const int lda, - const float *X, const int incX, - const float beta, float *Y, const int incY); - -typedef void cblas_sgemm_t(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_TRANSPOSE TransB, - const int M, const int N, const int K, - const float alpha, const float *A, const int lda, - const float *B, const int ldb, - const float beta, float *C, const int incC); - -typedef void fortran_sgemv_t( const char* trans, const int* m, const int* n, - const float* alpha, const float* A, const int* ldA, - const float* X, const int* incX, - const float* beta, float* Y, const int* incY ); - -static void *veclib = NULL; -static cblas_sgemv_t *accelerate_cblas_sgemv = NULL; -static cblas_sgemm_t *accelerate_cblas_sgemm = NULL; -static fortran_sgemv_t *accelerate_sgemv = NULL; -static int AVX_and_10_9 = 0; - -/* Dynamic check for AVX support - * __builtin_cpu_supports("avx") is available in gcc 4.8, - * but clang and icc do not currently support it. */ -static inline int -cpu_supports_avx() -{ - int enabled, r; - size_t length = sizeof(enabled); - r = sysctlbyname("hw.optional.avx1_0", &enabled, &length, NULL, 0); - if ( r == 0 && enabled != 0) { - return 1; - } - else { - return 0; - } -} - -/* Check if we are using MacOS X version 10.9 */ -static inline int -using_mavericks() -{ - int r; - char str[32] = {0}; - size_t size = sizeof(str); - r = sysctlbyname("kern.osproductversion", str, &size, NULL, 0); - if ( r == 0 && strncmp(str, "10.9", strlen("10.9")) == 0) { - return 1; - } - else { - return 0; - } -} - -__attribute__((destructor)) -static void unloadlib(void) -{ - if (veclib) dlclose(veclib); -} - -__attribute__((constructor)) -static void loadlib() -/* automatically executed on module import */ -{ - char errormsg[1024]; - int AVX, MAVERICKS; - memset((void*)errormsg, 0, sizeof(errormsg)); - /* check if the CPU supports AVX */ - AVX = cpu_supports_avx(); - /* check if the OS is MacOS X Mavericks */ - MAVERICKS = using_mavericks(); - /* we need the workaround when the CPU supports - * AVX and the OS version is Mavericks */ - AVX_and_10_9 = AVX && MAVERICKS; - /* load vecLib */ - veclib = dlopen(VECLIB_FILE, RTLD_LOCAL | RTLD_FIRST); - if (!veclib) { - veclib = NULL; - snprintf(errormsg, sizeof(errormsg), - "Failed to open vecLib from location '%s'.", VECLIB_FILE); - Py_FatalError(errormsg); /* calls abort() and dumps core */ - } - /* resolve Fortran SGEMV from Accelerate */ - accelerate_sgemv = (fortran_sgemv_t*) dlsym(veclib, "sgemv_"); - if (!accelerate_sgemv) { - unloadlib(); - Py_FatalError("Failed to resolve symbol 'sgemv_'."); - } - /* resolve cblas_sgemv from Accelerate */ - accelerate_cblas_sgemv = (cblas_sgemv_t*) dlsym(veclib, "cblas_sgemv"); - if (!accelerate_cblas_sgemv) { - unloadlib(); - Py_FatalError("Failed to resolve symbol 'cblas_sgemv'."); - } - /* resolve cblas_sgemm from Accelerate */ - accelerate_cblas_sgemm = (cblas_sgemm_t*) dlsym(veclib, "cblas_sgemm"); - if (!accelerate_cblas_sgemm) { - unloadlib(); - Py_FatalError("Failed to resolve symbol 'cblas_sgemm'."); - } -} - -/* ----------------------------------------------------------------- */ -/* Fortran SGEMV override */ - -void sgemv_( const char* trans, const int* m, const int* n, - const float* alpha, const float* A, const int* ldA, - const float* X, const int* incX, - const float* beta, float* Y, const int* incY ) -{ - /* It is safe to use the original SGEMV if we are not using AVX on Mavericks - * or the input arrays A, X and Y are all aligned on 32 byte boundaries. */ - #define BADARRAY(x) (((npy_intp)(void*)x) % 32) - const int use_sgemm = AVX_and_10_9 && (BADARRAY(A) || BADARRAY(X) || BADARRAY(Y)); - if (!use_sgemm) { - accelerate_sgemv(trans,m,n,alpha,A,ldA,X,incX,beta,Y,incY); - return; - } - - /* Arrays are misaligned, the CPU supports AVX, and we are running - * Mavericks. - * - * Emulation of SGEMV with SGEMM: - * - * SGEMV allows vectors to be strided. SGEMM requires all arrays to be - * contiguous along the leading dimension. To emulate striding in SGEMV - * with the leading dimension arguments in SGEMM we compute - * - * Y = alpha * op(A) @ X + beta * Y - * - * as - * - * Y.T = alpha * X.T @ op(A).T + beta * Y.T - * - * Because Fortran uses column major order and X.T and Y.T are row vectors, - * the leading dimensions of X.T and Y.T in SGEMM become equal to the - * strides of the column vectors X and Y in SGEMV. */ - - switch (*trans) { - case 'T': - case 't': - case 'C': - case 'c': - accelerate_cblas_sgemm( CblasColMajor, CblasNoTrans, CblasNoTrans, - 1, *n, *m, *alpha, X, *incX, A, *ldA, *beta, Y, *incY ); - break; - case 'N': - case 'n': - accelerate_cblas_sgemm( CblasColMajor, CblasNoTrans, CblasTrans, - 1, *m, *n, *alpha, X, *incX, A, *ldA, *beta, Y, *incY ); - break; - default: - cblas_xerbla(1, "SGEMV", "Illegal transpose setting: %c\n", *trans); - } -} - -/* ----------------------------------------------------------------- */ -/* Override for an alias symbol for sgemv_ in Accelerate */ - -void sgemv (char *trans, - const int *m, const int *n, - const float *alpha, - const float *A, const int *lda, - const float *B, const int *incB, - const float *beta, - float *C, const int *incC) -{ - sgemv_(trans,m,n,alpha,A,lda,B,incB,beta,C,incC); -} - -/* ----------------------------------------------------------------- */ -/* cblas_sgemv override, based on Netlib CBLAS code */ - -void cblas_sgemv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const float alpha, const float *A, const int lda, - const float *X, const int incX, const float beta, - float *Y, const int incY) -{ - char TA; - if (order == CblasColMajor) - { - if (TransA == CblasNoTrans) TA = 'N'; - else if (TransA == CblasTrans) TA = 'T'; - else if (TransA == CblasConjTrans) TA = 'C'; - else - { - cblas_xerbla(2, "cblas_sgemv","Illegal TransA setting, %d\n", TransA); - } - sgemv_(&TA, &M, &N, &alpha, A, &lda, X, &incX, &beta, Y, &incY); - } - else if (order == CblasRowMajor) - { - if (TransA == CblasNoTrans) TA = 'T'; - else if (TransA == CblasTrans) TA = 'N'; - else if (TransA == CblasConjTrans) TA = 'N'; - else - { - cblas_xerbla(2, "cblas_sgemv", "Illegal TransA setting, %d\n", TransA); - return; - } - sgemv_(&TA, &N, &M, &alpha, A, &lda, X, &incX, &beta, Y, &incY); - } - else - cblas_xerbla(1, "cblas_sgemv", "Illegal Order setting, %d\n", order); -} diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index f43b77c44..20ad39b05 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -5901,29 +5901,21 @@ add_newdoc('numpy.core.numerictypes', 'generic', # Attributes -add_newdoc('numpy.core.numerictypes', 'generic', ('T', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) +def refer_to_array_attribute(attr, method=True): + docstring = """ + Scalar {} identical to the corresponding array attribute. -add_newdoc('numpy.core.numerictypes', 'generic', ('base', + Please see `ndarray.{}`. """ - Not implemented (virtual attribute) - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - a uniform API. + return attr, docstring.format("method" if method else "attribute", attr) - See also the corresponding attribute of the derived class of interest. - """)) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('T', method=False)) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('base', method=False)) add_newdoc('numpy.core.numerictypes', 'generic', ('data', """Pointer to start of data.""")) @@ -5963,305 +5955,80 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('strides', # Methods -add_newdoc('numpy.core.numerictypes', 'generic', ('all', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('any', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('argmax', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('argmin', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('argsort', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('astype', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('choose', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('clip', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('compress', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('copy', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('dump', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('dumps', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('fill', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('flatten', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('all')) -add_newdoc('numpy.core.numerictypes', 'generic', ('getfield', - """ - Not implemented (virtual attribute) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('any')) - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('argmax')) - See also the corresponding attribute of the derived class of interest. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('argmin')) - """)) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('argsort')) -add_newdoc('numpy.core.numerictypes', 'generic', ('item', - """ - Not implemented (virtual attribute) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('astype')) - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('byteswap')) - See also the corresponding attribute of the derived class of interest. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('choose')) - """)) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('clip')) -add_newdoc('numpy.core.numerictypes', 'generic', ('itemset', - """ - Not implemented (virtual attribute) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('compress')) - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('conjugate')) - See also the corresponding attribute of the derived class of interest. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('copy')) - """)) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('cumprod')) -add_newdoc('numpy.core.numerictypes', 'generic', ('max', - """ - Not implemented (virtual attribute) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('cumsum')) - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('diagonal')) - See also the corresponding attribute of the derived class of interest. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('dump')) - """)) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('dumps')) -add_newdoc('numpy.core.numerictypes', 'generic', ('mean', - """ - Not implemented (virtual attribute) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('fill')) - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('flatten')) - See also the corresponding attribute of the derived class of interest. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('getfield')) - """)) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('item')) -add_newdoc('numpy.core.numerictypes', 'generic', ('min', - """ - Not implemented (virtual attribute) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('itemset')) - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('max')) - See also the corresponding attribute of the derived class of interest. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('mean')) - """)) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('min')) add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', """ @@ -6296,305 +6063,80 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', """)) -add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('prod', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('ptp', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('put', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('ravel', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('repeat', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('reshape', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('resize', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('round', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('setfield', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('setflags', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('sort', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('std', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('sum', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('take', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('tofile', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('nonzero')) -add_newdoc('numpy.core.numerictypes', 'generic', ('tolist', - """ - Not implemented (virtual attribute) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('prod')) - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('ptp')) - See also the corresponding attribute of the derived class of interest. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('put')) - """)) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('ravel')) -add_newdoc('numpy.core.numerictypes', 'generic', ('tostring', - """ - Not implemented (virtual attribute) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('repeat')) - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('reshape')) - See also the corresponding attribute of the derived class of interest. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('resize')) - """)) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('round')) -add_newdoc('numpy.core.numerictypes', 'generic', ('trace', - """ - Not implemented (virtual attribute) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('searchsorted')) - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('setfield')) - See also the corresponding attribute of the derived class of interest. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('setflags')) - """)) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('sort')) -add_newdoc('numpy.core.numerictypes', 'generic', ('transpose', - """ - Not implemented (virtual attribute) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('squeeze')) - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('std')) - See also the corresponding attribute of the derived class of interest. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('sum')) - """)) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('swapaxes')) -add_newdoc('numpy.core.numerictypes', 'generic', ('var', - """ - Not implemented (virtual attribute) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('take')) - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('tofile')) - See also the corresponding attribute of the derived class of interest. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('tolist')) - """)) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('tostring')) -add_newdoc('numpy.core.numerictypes', 'generic', ('view', - """ - Not implemented (virtual attribute) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('trace')) - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('transpose')) - See also the corresponding attribute of the derived class of interest. +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('var')) - """)) +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('view')) ############################################################################## diff --git a/numpy/core/_dtype_ctypes.py b/numpy/core/_dtype_ctypes.py index 708241289..6d7cbb244 100644 --- a/numpy/core/_dtype_ctypes.py +++ b/numpy/core/_dtype_ctypes.py @@ -22,9 +22,10 @@ Unfortunately, this fails because: * PEP3118 cannot represent unions, but both numpy and ctypes can * ctypes cannot handle big-endian structs with PEP3118 (bpo-32780) """ -import _ctypes -import ctypes +# We delay-import ctypes for distributions that do not include it. +# While this module is not used unless the user passes in ctypes +# members, it is eagerly imported from numpy/core/__init__.py. import numpy as np @@ -39,6 +40,7 @@ def _from_ctypes_structure(t): "ctypes bitfields have no dtype equivalent") if hasattr(t, "_pack_"): + import ctypes formats = [] offsets = [] names = [] @@ -79,6 +81,7 @@ def _from_ctypes_scalar(t): def _from_ctypes_union(t): + import ctypes formats = [] offsets = [] names = [] @@ -98,6 +101,7 @@ def dtype_from_ctypes_type(t): """ Construct a dtype object from a ctypes type """ + import _ctypes if issubclass(t, _ctypes.Array): return _from_ctypes_array(t) elif issubclass(t, _ctypes._Pointer): diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 456ef76f0..5d9642ea8 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -695,7 +695,7 @@ def array2string(a, max_line_width=None, precision=None, def _extendLine(s, line, word, line_width, next_line_prefix, legacy): needs_wrap = len(line) + len(word) > line_width if legacy != '1.13': - s# don't wrap lines if it won't help + # don't wrap lines if it won't help if len(line) <= len(next_line_prefix): needs_wrap = False @@ -706,6 +706,33 @@ def _extendLine(s, line, word, line_width, next_line_prefix, legacy): return s, line +def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy): + """ + Extends line with nicely formatted (possibly multi-line) string ``word``. + """ + words = word.splitlines() + if len(words) == 1 or legacy == '1.13': + return _extendLine(s, line, word, line_width, next_line_prefix, legacy) + + max_word_length = max(len(word) for word in words) + if (len(line) + max_word_length > line_width and + len(line) > len(next_line_prefix)): + s += line.rstrip() + '\n' + line = next_line_prefix + words[0] + indent = next_line_prefix + else: + indent = len(line)*' ' + line += words[0] + + for word in words[1::]: + s += line.rstrip() + '\n' + line = indent + word + + suffix_length = max_word_length - len(words[-1]) + line += suffix_length*' ' + + return s, line + def _formatArray(a, format_function, line_width, next_line_prefix, separator, edge_items, summary_insert, legacy): """formatArray is designed for two modes of operation: @@ -758,7 +785,7 @@ def _formatArray(a, format_function, line_width, next_line_prefix, line = hanging_indent for i in range(leading_items): word = recurser(index + (i,), next_hanging_indent, next_width) - s, line = _extendLine( + s, line = _extendLine_pretty( s, line, word, elem_width, hanging_indent, legacy) line += separator @@ -772,7 +799,7 @@ def _formatArray(a, format_function, line_width, next_line_prefix, for i in range(trailing_items, 1, -1): word = recurser(index + (-i,), next_hanging_indent, next_width) - s, line = _extendLine( + s, line = _extendLine_pretty( s, line, word, elem_width, hanging_indent, legacy) line += separator @@ -780,7 +807,7 @@ def _formatArray(a, format_function, line_width, next_line_prefix, # width of the separator is not considered on 1.13 elem_width = curr_width word = recurser(index + (-1,), next_hanging_indent, next_width) - s, line = _extendLine( + s, line = _extendLine_pretty( s, line, word, elem_width, hanging_indent, legacy) s += line diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt index 528113a9e..1868610f4 100644 --- a/numpy/core/code_generators/cversions.txt +++ b/numpy/core/code_generators/cversions.txt @@ -52,3 +52,6 @@ # Version 13 (NumPy 1.19) No change. # Version 13 (NumPy 1.20) No change. 0x0000000d = 5b0e8bbded00b166125974fc71e80a33 + +# Version 14 (NumPy 1.19) DType related API additions +0x0000000e = 17a0f366e55ec05e5c5c149123478452 diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py index 88dc2d90a..d88772bdc 100644 --- a/numpy/core/code_generators/genapi.py +++ b/numpy/core/code_generators/genapi.py @@ -37,6 +37,7 @@ API_FILES = [join('multiarray', 'alloc.c'), join('multiarray', 'datetime_busdaycal.c'), join('multiarray', 'datetime_strings.c'), join('multiarray', 'descriptor.c'), + join('multiarray', 'dtypemeta.c'), join('multiarray', 'einsum.c.src'), join('multiarray', 'flagsobject.c'), join('multiarray', 'getset.c'), @@ -309,11 +310,13 @@ def write_file(filename, data): # Those *Api classes instances know how to output strings for the generated code class TypeApi: - def __init__(self, name, index, ptr_cast, api_name): + def __init__(self, name, index, ptr_cast, api_name, internal_type=None): self.index = index self.name = name self.ptr_cast = ptr_cast self.api_name = api_name + # The type used internally, if None, same as exported (ptr_cast) + self.internal_type = internal_type def define_from_array_api_string(self): return "#define %s (*(%s *)%s[%d])" % (self.name, @@ -325,9 +328,19 @@ class TypeApi: return " (void *) &%s" % self.name def internal_define(self): - astr = """\ -extern NPY_NO_EXPORT PyTypeObject %(type)s; -""" % {'type': self.name} + if self.internal_type is None: + return f"extern NPY_NO_EXPORT {self.ptr_cast} {self.name};\n" + + # If we are here, we need to define a larger struct internally, which + # the type can be cast safely. But we want to normally use the original + # type, so name mangle: + mangled_name = f"{self.name}Full" + astr = ( + # Create the mangled name: + f"extern NPY_NO_EXPORT {self.internal_type} {mangled_name};\n" + # And define the name as: (*(type *)(&mangled_name)) + f"#define {self.name} (*({self.ptr_cast} *)(&{mangled_name}))\n" + ) return astr class GlobalVarApi: diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py index fe21bc543..7997135bb 100644 --- a/numpy/core/code_generators/generate_numpy_api.py +++ b/numpy/core/code_generators/generate_numpy_api.py @@ -201,7 +201,9 @@ def do_generate_api(targets, sources): for name, val in types_api.items(): index = val[0] - multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name) + internal_type = None if len(val) == 1 else val[1] + multiarray_api_dict[name] = TypeApi( + name, index, 'PyTypeObject', api_name, internal_type) if len(multiarray_api_dict) != len(multiarray_api_index): keys_dict = set(multiarray_api_dict.keys()) diff --git a/numpy/core/code_generators/numpy_api.py b/numpy/core/code_generators/numpy_api.py index 916fb537e..fbd323368 100644 --- a/numpy/core/code_generators/numpy_api.py +++ b/numpy/core/code_generators/numpy_api.py @@ -30,7 +30,9 @@ multiarray_scalar_bool_values = { multiarray_types_api = { 'PyBigArray_Type': (1,), 'PyArray_Type': (2,), - 'PyArrayDescr_Type': (3,), + # Internally, PyArrayDescr_Type is a PyArray_DTypeMeta, + # the following also defines PyArrayDescr_TypeFull (Full appended) + 'PyArrayDescr_Type': (3, "PyArray_DTypeMeta"), 'PyArrayFlags_Type': (4,), 'PyArrayIter_Type': (5,), 'PyArrayMultiIter_Type': (6,), diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 7193af839..0c63bcf73 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -1374,10 +1374,17 @@ def resize(a, new_shape): See Also -------- + np.reshape : Reshape an array without changing the total size. + np.pad : Enlarge and pad an array. + np.repeat: Repeat elements of an array. ndarray.resize : resize an array in-place. Notes ----- + When the total size of the array does not change `~numpy.reshape` should + be used. In most other cases either indexing (to reduce the size) + or padding (to increase the size) may be a more appropriate solution. + Warning: This functionality does **not** consider axes separately, i.e. it does not apply interpolation/extrapolation. It fills the return array with the required number of elements, taken @@ -1401,22 +1408,21 @@ def resize(a, new_shape): """ if isinstance(new_shape, (int, nt.integer)): new_shape = (new_shape,) + a = ravel(a) - Na = len(a) - total_size = um.multiply.reduce(new_shape) - if Na == 0 or total_size == 0: - return mu.zeros(new_shape, a.dtype) - n_copies = int(total_size / Na) - extra = total_size % Na + new_size = 1 + for dim_length in new_shape: + new_size *= dim_length + if dim_length < 0: + raise ValueError('all elements of `new_shape` must be non-negative') - if extra != 0: - n_copies = n_copies + 1 - extra = Na - extra + if a.size == 0 or new_size == 0: + # First case must zero fill. The second would have repeats == 0. + return np.zeros_like(a, shape=new_shape) - a = concatenate((a,) * n_copies) - if extra > 0: - a = a[:-extra] + repeats = -(-new_size // a.size) # ceil division + a = concatenate((a,) * repeats)[:new_size] return reshape(a, new_shape) diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index 5b7e8952e..5dd62e64a 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -1809,6 +1809,77 @@ typedef struct { typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, void *user_data); + +/* + * PyArray_DTypeMeta related definitions. + * + * As of now, this API is preliminary and will be extended as necessary. + */ +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* + * The Structures defined in this block are considered private API and + * may change without warning! + */ + /* TODO: Make this definition public in the API, as soon as its settled */ + NPY_NO_EXPORT PyTypeObject PyArrayDTypeMeta_Type; + + /* + * While NumPy DTypes would not need to be heap types the plan is to + * make DTypes available in Python at which point we will probably want + * them to be. + * Since we also wish to add fields to the DType class, this looks like + * a typical instance definition, but with PyHeapTypeObject instead of + * only the PyObject_HEAD. + * This must only be exposed very extremely careful consideration, since + * it is a fairly complex construct which may be better to allow + * refactoring of. + */ + typedef struct _PyArray_DTypeMeta { + PyHeapTypeObject super; + + /* + * Most DTypes will have a singleton default instance, for the + * parametric legacy DTypes (bytes, string, void, datetime) this + * may be a pointer to the *prototype* instance? + */ + PyArray_Descr *singleton; + /* + * Is this DType created using the old API? This exists mainly to + * allow for assertions in paths specific to wrapping legacy types. + */ + npy_bool legacy; + /* The values stored by a parametric datatype depend on its instance */ + npy_bool parametric; + /* whether the DType can be instantiated (i.e. np.dtype cannot) */ + npy_bool abstract; + + /* + * The following fields replicate the most important dtype information. + * In the legacy implementation most of these are stored in the + * PyArray_Descr struct. + */ + /* The type object of the scalar instances (may be NULL?) */ + PyTypeObject *scalar_type; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* flags describing data type */ + char flags; + /* number representing this type */ + int type_num; + /* + * Point to the original ArrFuncs. + * NOTE: We could make a copy to detect changes to `f`. + */ + PyArray_ArrFuncs *f; + } PyArray_DTypeMeta; + + #define NPY_DTYPE(descr) ((PyArray_DTypeMeta *)Py_TYPE(descr)) + +#endif /* NPY_INTERNAL_BUILD */ + + /* * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files * npy_*_*_deprecated_api.h are only included from here and nowhere else. diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py index 5ae6a4272..2cc2a8e71 100644 --- a/numpy/core/multiarray.py +++ b/numpy/core/multiarray.py @@ -1139,7 +1139,7 @@ def packbits(a, axis=None, bitorder='big'): ``None`` implies packing the flattened array. bitorder : {'big', 'little'}, optional The order of the input bits. 'big' will mimic bin(val), - ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011 => ``, 'little' will + ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``, 'little' will reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``. Defaults to 'big'. diff --git a/numpy/core/records.py b/numpy/core/records.py index af59de425..7e1c0d591 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -772,7 +772,7 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, names=None, titles=None, aligned=False, byteorder=None): - """Create a record array from binary data + r"""Create a record array from binary data Note that despite the name of this function it does not accept `str` instances. diff --git a/numpy/core/setup.py b/numpy/core/setup.py index fcc422545..16bac4272 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -10,9 +10,6 @@ from os.path import join from numpy.distutils import log from distutils.dep_util import newer from distutils.sysconfig import get_config_var -from numpy._build_utils.apple_accelerate import ( - uses_accelerate_framework, get_sgemv_fix - ) from numpy.compat import npy_load_module from setup_common import * # noqa: F403 @@ -392,7 +389,13 @@ def visibility_define(config): def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration, dot_join - from numpy.distutils.system_info import get_info + from numpy.distutils.system_info import (get_info, blas_opt_info, + lapack_opt_info) + + # Accelerate is buggy, disallow it. See also numpy/linalg/setup.py + for opt_order in (blas_opt_info.blas_order, lapack_opt_info.lapack_order): + if 'accelerate' in opt_order: + opt_order.remove('accelerate') config = Configuration('core', parent_package, top_path) local_dir = config.local_path @@ -762,8 +765,6 @@ def configuration(parent_package='',top_path=None): common_src.extend([join('src', 'common', 'cblasfuncs.c'), join('src', 'common', 'python_xerbla.c'), ]) - if uses_accelerate_framework(blas_info): - common_src.extend(get_sgemv_fix()) else: extra_info = {} @@ -783,6 +784,7 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'conversion_utils.h'), join('src', 'multiarray', 'ctors.h'), join('src', 'multiarray', 'descriptor.h'), + join('src', 'multiarray', 'dtypemeta.h'), join('src', 'multiarray', 'dragon4.h'), join('src', 'multiarray', 'getset.h'), join('src', 'multiarray', 'hashdescr.h'), @@ -841,6 +843,7 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'datetime_busday.c'), join('src', 'multiarray', 'datetime_busdaycal.c'), join('src', 'multiarray', 'descriptor.c'), + join('src', 'multiarray', 'dtypemeta.c'), join('src', 'multiarray', 'dragon4.c'), join('src', 'multiarray', 'dtype_transfer.c'), join('src', 'multiarray', 'einsum.c.src'), @@ -963,6 +966,7 @@ def configuration(parent_package='',top_path=None): config.add_subpackage('tests') config.add_data_dir('tests/data') + config.add_data_dir('tests/examples') config.make_svn_version_py() diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index 63c4a76a9..72b59f9ae 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -40,7 +40,8 @@ C_ABI_VERSION = 0x01000009 # 0x0000000c - 1.14.x # 0x0000000c - 1.15.x # 0x0000000d - 1.16.x -C_API_VERSION = 0x0000000d +# 0x0000000e - 1.19.x +C_API_VERSION = 0x0000000e class MismatchCAPIWarning(Warning): pass diff --git a/numpy/core/src/common/npy_cpu_features.c.src b/numpy/core/src/common/npy_cpu_features.c.src index 83666f63b..bd4743905 100644 --- a/numpy/core/src/common/npy_cpu_features.c.src +++ b/numpy/core/src/common/npy_cpu_features.c.src @@ -76,11 +76,13 @@ npy__cpu_getxcr0(void) #if defined(_MSC_VER) || defined (__INTEL_COMPILER) return _xgetbv(0); #elif defined(__GNUC__) || defined(__clang__) + /* named form of xgetbv not supported on OSX, so must use byte form, see: + * https://github.com/asmjit/asmjit/issues/78 + */ unsigned int eax, edx; - __asm__("xgetbv" : "=a" (eax), "=d" (edx) : "c" (0)); - return (eax | (unsigned long long)edx << 32); + __asm(".byte 0x0F, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c"(0)); + return eax; #else - // TODO: handle other x86 compilers return 0; #endif } @@ -110,7 +112,6 @@ npy__cpu_cpuid(int reg[4], int func_id) ); #endif #else - // TODO: handle other x86 compilers reg[0] = 0; #endif } @@ -123,8 +124,15 @@ npy__cpu_init_features(void) // validate platform support int reg[] = {0, 0, 0, 0}; npy__cpu_cpuid(reg, 0); - if (reg[0] == 0) - return; + if (reg[0] == 0) { + npy__cpu_have[NPY_CPU_FEATURE_MMX] = 1; + npy__cpu_have[NPY_CPU_FEATURE_SSE] = 1; + npy__cpu_have[NPY_CPU_FEATURE_SSE2] = 1; + #ifdef NPY_CPU_AMD64 + npy__cpu_have[NPY_CPU_FEATURE_SSE3] = 1; + #endif + return; + } npy__cpu_cpuid(reg, 1); npy__cpu_have[NPY_CPU_FEATURE_MMX] = (reg[3] & (1 << 23)) != 0; diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index 38d5f21eb..552c56349 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -20,6 +20,7 @@ #include "npy_sort.h" #include "common.h" #include "ctors.h" +#include "dtypemeta.h" #include "lowlevel_strided_loops.h" #include "usertypes.h" #include "_datetime.h" @@ -4367,6 +4368,17 @@ set_typeinfo(PyObject *dict) PyObject *cobj, *key; /* + * Override the base class for all types, eventually all of this logic + * should be defined on the class and inherited to the scalar. + * (NPY_HALF is the largest builtin one.) + */ + for (i = 0; i <= NPY_HALF; i++) { + if (dtypemeta_wrap_legacy_descriptor(_builtin_descrs[i]) < 0) { + return -1; + } + } + + /* * Add cast functions for the new types */ diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 308e72009..7a232b5d9 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1425,47 +1425,13 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args) #else char *docstr; #endif - static char *msg = "already has a docstring"; - PyObject *tp_dict = PyArrayDescr_Type.tp_dict; - PyObject *myobj; - static PyTypeObject *PyMemberDescr_TypePtr = NULL; - static PyTypeObject *PyGetSetDescr_TypePtr = NULL; - static PyTypeObject *PyMethodDescr_TypePtr = NULL; + static char *msg = "already has a different docstring"; /* Don't add docstrings */ if (Py_OptimizeFlag > 1) { Py_RETURN_NONE; } - if (PyGetSetDescr_TypePtr == NULL) { - /* Get "subdescr" */ - myobj = _PyDict_GetItemStringWithError(tp_dict, "fields"); - if (myobj == NULL && PyErr_Occurred()) { - return NULL; - } - if (myobj != NULL) { - PyGetSetDescr_TypePtr = Py_TYPE(myobj); - } - } - if (PyMemberDescr_TypePtr == NULL) { - myobj = _PyDict_GetItemStringWithError(tp_dict, "alignment"); - if (myobj == NULL && PyErr_Occurred()) { - return NULL; - } - if (myobj != NULL) { - PyMemberDescr_TypePtr = Py_TYPE(myobj); - } - } - if (PyMethodDescr_TypePtr == NULL) { - myobj = _PyDict_GetItemStringWithError(tp_dict, "newbyteorder"); - if (myobj == NULL && PyErr_Occurred()) { - return NULL; - } - if (myobj != NULL) { - PyMethodDescr_TypePtr = Py_TYPE(myobj); - } - } - if (!PyArg_ParseTuple(args, "OO!:add_docstring", &obj, &PyUnicode_Type, &str)) { return NULL; } @@ -1475,39 +1441,47 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args) return NULL; } -#define _TESTDOC1(typebase) (Py_TYPE(obj) == &Py##typebase##_Type) -#define _TESTDOC2(typebase) (Py_TYPE(obj) == Py##typebase##_TypePtr) -#define _ADDDOC(typebase, doc, name) do { \ - Py##typebase##Object *new = (Py##typebase##Object *)obj; \ +#define _ADDDOC(doc, name) \ if (!(doc)) { \ doc = docstr; \ + Py_INCREF(str); /* hold on to string (leaks reference) */ \ } \ - else { \ + else if (strcmp(doc, docstr) != 0) { \ PyErr_Format(PyExc_RuntimeError, "%s method %s", name, msg); \ return NULL; \ - } \ - } while (0) + } - if (_TESTDOC1(CFunction)) { - _ADDDOC(CFunction, new->m_ml->ml_doc, new->m_ml->ml_name); + if (Py_TYPE(obj) == &PyCFunction_Type) { + PyCFunctionObject *new = (PyCFunctionObject *)obj; + _ADDDOC(new->m_ml->ml_doc, new->m_ml->ml_name); } - else if (_TESTDOC1(Type)) { - _ADDDOC(Type, new->tp_doc, new->tp_name); + else if (Py_TYPE(obj) == &PyType_Type) { + PyTypeObject *new = (PyTypeObject *)obj; + _ADDDOC(new->tp_doc, new->tp_name); } - else if (_TESTDOC2(MemberDescr)) { - _ADDDOC(MemberDescr, new->d_member->doc, new->d_member->name); + else if (Py_TYPE(obj) == &PyMemberDescr_Type) { + PyMemberDescrObject *new = (PyMemberDescrObject *)obj; + _ADDDOC(new->d_member->doc, new->d_member->name); } - else if (_TESTDOC2(GetSetDescr)) { - _ADDDOC(GetSetDescr, new->d_getset->doc, new->d_getset->name); + else if (Py_TYPE(obj) == &PyGetSetDescr_Type) { + PyGetSetDescrObject *new = (PyGetSetDescrObject *)obj; + _ADDDOC(new->d_getset->doc, new->d_getset->name); } - else if (_TESTDOC2(MethodDescr)) { - _ADDDOC(MethodDescr, new->d_method->ml_doc, new->d_method->ml_name); + else if (Py_TYPE(obj) == &PyMethodDescr_Type) { + PyMethodDescrObject *new = (PyMethodDescrObject *)obj; + _ADDDOC(new->d_method->ml_doc, new->d_method->ml_name); } else { PyObject *doc_attr; doc_attr = PyObject_GetAttrString(obj, "__doc__"); - if (doc_attr != NULL && doc_attr != Py_None) { + if (doc_attr != NULL && doc_attr != Py_None && + (PyUnicode_Compare(doc_attr, str) != 0)) { + Py_DECREF(doc_attr); + if (PyErr_Occurred()) { + /* error during PyUnicode_Compare */ + return NULL; + } PyErr_Format(PyExc_RuntimeError, "object %s", msg); return NULL; } @@ -1521,11 +1495,8 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args) Py_RETURN_NONE; } -#undef _TESTDOC1 -#undef _TESTDOC2 #undef _ADDDOC - Py_INCREF(str); Py_RETURN_NONE; } diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 9283eefce..14e64b647 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -483,6 +483,8 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s, /* INCREF on entry DECREF on exit */ Py_INCREF(s); + PyObject *seq = NULL; + if (PyArray_Check(s)) { if (!(PyArray_CheckExact(s))) { /* @@ -529,10 +531,11 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s, return 0; } - slen = PySequence_Length(s); - if (slen < 0) { + seq = PySequence_Fast(s, "Could not convert object to sequence"); + if (seq == NULL) { goto fail; } + slen = PySequence_Fast_GET_SIZE(seq); /* * Either the dimensions match, or the sequence has length 1 and can @@ -547,14 +550,9 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s, /* Broadcast the one element from the sequence to all the outputs */ if (slen == 1) { - PyObject *o; + PyObject *o = PySequence_Fast_GET_ITEM(seq, 0); npy_intp alen = PyArray_DIM(a, dim); - o = PySequence_GetItem(s, 0); - if (o == NULL) { - goto fail; - } - for (i = 0; i < alen; i++) { if ((PyArray_NDIM(a) - dim) > 1) { PyArrayObject * tmp = @@ -571,26 +569,18 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s, res = PyArray_SETITEM(dst, b, o); } if (res < 0) { - Py_DECREF(o); goto fail; } } - Py_DECREF(o); } /* Copy element by element */ else { - PyObject * seq; - seq = PySequence_Fast(s, "Could not convert object to sequence"); - if (seq == NULL) { - goto fail; - } for (i = 0; i < slen; i++) { PyObject * o = PySequence_Fast_GET_ITEM(seq, i); if ((PyArray_NDIM(a) - dim) > 1) { PyArrayObject * tmp = (PyArrayObject *)array_item_asarray(dst, i); if (tmp == NULL) { - Py_DECREF(seq); goto fail; } @@ -602,17 +592,17 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s, res = PyArray_SETITEM(dst, b, o); } if (res < 0) { - Py_DECREF(seq); goto fail; } } - Py_DECREF(seq); } + Py_DECREF(seq); Py_DECREF(s); return 0; fail: + Py_XDECREF(seq); Py_DECREF(s); return res; } @@ -879,7 +869,7 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it, return 0; } -static PyObject * +static void raise_memory_error(int nd, npy_intp const *dims, PyArray_Descr *descr) { static PyObject *exc_type = NULL; @@ -904,12 +894,12 @@ raise_memory_error(int nd, npy_intp const *dims, PyArray_Descr *descr) } PyErr_SetObject(exc_type, exc_value); Py_DECREF(exc_value); - return NULL; + return; fail: /* we couldn't raise the formatted exception for some reason */ PyErr_WriteUnraisable(NULL); - return PyErr_NoMemory(); + PyErr_NoMemory(); } /* @@ -1089,10 +1079,10 @@ PyArray_NewFromDescr_int( data = npy_alloc_cache(nbytes); } if (data == NULL) { - return raise_memory_error(fa->nd, fa->dimensions, descr); + raise_memory_error(fa->nd, fa->dimensions, descr); + goto fail; } fa->flags |= NPY_ARRAY_OWNDATA; - } else { /* @@ -2039,12 +2029,14 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, PyErr_SetString(PyExc_ValueError, "object of too small depth for desired array"); Py_DECREF(arr); + Py_XDECREF(newtype); ret = NULL; } else if (max_depth != 0 && PyArray_NDIM(arr) > max_depth) { PyErr_SetString(PyExc_ValueError, "object too deep for desired array"); Py_DECREF(arr); + Py_XDECREF(newtype); ret = NULL; } else { diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index b26a26abf..b4107f8f3 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -1744,7 +1744,7 @@ fail: NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew(PyArray_Descr *base) { - PyArray_Descr *newdescr = PyObject_New(PyArray_Descr, &PyArrayDescr_Type); + PyArray_Descr *newdescr = PyObject_New(PyArray_Descr, Py_TYPE(base)); if (newdescr == NULL) { return NULL; @@ -2261,9 +2261,16 @@ static PyGetSetDef arraydescr_getsets[] = { }; static PyObject * -arraydescr_new(PyTypeObject *NPY_UNUSED(subtype), +arraydescr_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds) { + if (subtype != &PyArrayDescr_Type) { + /* The DTypeMeta class should prevent this from happening. */ + PyErr_Format(PyExc_SystemError, + "'%S' must not inherit np.dtype.__new__().", subtype); + return NULL; + } + PyObject *odescr, *metadata=NULL; PyArray_Descr *descr, *conv; npy_bool align = NPY_FALSE; @@ -2334,6 +2341,7 @@ arraydescr_new(PyTypeObject *NPY_UNUSED(subtype), return (PyObject *)conv; } + /* * Return a tuple of * (cleaned metadata dictionary, tuple with (str, num)) @@ -3456,21 +3464,34 @@ static PyMappingMethods descr_as_mapping = { /****************** End of Mapping Protocol ******************************/ -NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type = { - PyVarObject_HEAD_INIT(NULL, 0) - .tp_name = "numpy.dtype", - .tp_basicsize = sizeof(PyArray_Descr), - /* methods */ - .tp_dealloc = (destructor)arraydescr_dealloc, - .tp_repr = (reprfunc)arraydescr_repr, - .tp_as_number = &descr_as_number, - .tp_as_sequence = &descr_as_sequence, - .tp_as_mapping = &descr_as_mapping, - .tp_str = (reprfunc)arraydescr_str, - .tp_flags = Py_TPFLAGS_DEFAULT, - .tp_richcompare = (richcmpfunc)arraydescr_richcompare, - .tp_methods = arraydescr_methods, - .tp_members = arraydescr_members, - .tp_getset = arraydescr_getsets, - .tp_new = arraydescr_new, + +/* + * NOTE: Since this is a MetaClass, the name has Full appended here, the + * correct name of the type is PyArrayDescr_Type. + */ +NPY_NO_EXPORT PyArray_DTypeMeta PyArrayDescr_TypeFull = { + {{ + /* NULL represents `type`, this is set to DTypeMeta at import time */ + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "numpy.dtype", + .tp_basicsize = sizeof(PyArray_Descr), + .tp_dealloc = (destructor)arraydescr_dealloc, + .tp_repr = (reprfunc)arraydescr_repr, + .tp_as_number = &descr_as_number, + .tp_as_sequence = &descr_as_sequence, + .tp_as_mapping = &descr_as_mapping, + .tp_str = (reprfunc)arraydescr_str, + .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + .tp_richcompare = (richcmpfunc)arraydescr_richcompare, + .tp_methods = arraydescr_methods, + .tp_members = arraydescr_members, + .tp_getset = arraydescr_getsets, + .tp_new = arraydescr_new, + },}, + .type_num = -1, + .kind = '\0', + .abstract = 1, + .parametric = 0, + .singleton = 0, + .scalar_type = NULL, }; diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c new file mode 100644 index 000000000..76f7b599a --- /dev/null +++ b/numpy/core/src/multiarray/dtypemeta.c @@ -0,0 +1,269 @@ +/* Array Descr Object */ + +#define PY_SSIZE_T_CLEAN +#include <Python.h> +#include "structmember.h" +#include "assert.h" + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE +#include <numpy/ndarraytypes.h> +#include "npy_pycompat.h" + +#include "dtypemeta.h" + + +static void +dtypemeta_dealloc(PyArray_DTypeMeta *self) { + /* Do not accidentally delete a statically defined DType: */ + assert(((PyTypeObject *)self)->tp_flags & Py_TPFLAGS_HEAPTYPE); + + Py_XDECREF(self->scalar_type); + Py_XDECREF(self->singleton); + PyType_Type.tp_dealloc((PyObject *) self); +} + +static PyObject * +dtypemeta_new(PyTypeObject *NPY_UNUSED(type), + PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds)) +{ + PyErr_SetString(PyExc_TypeError, + "Preliminary-API: Cannot subclass DType."); + return NULL; +} + +static int +dtypemeta_init(PyTypeObject *NPY_UNUSED(type), + PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds)) +{ + PyErr_SetString(PyExc_TypeError, + "Preliminary-API: Cannot __init__ DType class."); + return -1; +} + +/** + * tp_is_gc slot of Python types. This is implemented only for documentation + * purposes to indicate and document the subtleties involved. + * + * Python Type objects are either statically created (typical C-Extension type) + * or HeapTypes (typically created in Python). + * HeapTypes have the Py_TPFLAGS_HEAPTYPE flag and are garbage collected. + * Our DTypeMeta instances (`np.dtype` and its subclasses) *may* be HeapTypes + * if the Py_TPFLAGS_HEAPTYPE flag is set (they are created from Python). + * They are not for legacy DTypes or np.dtype itself. + * + * @param self + * @return nonzero if the object is garbage collected + */ +static NPY_INLINE int +dtypemeta_is_gc(PyObject *dtype_class) +{ + return PyType_Type.tp_is_gc(dtype_class); +} + + +static int +dtypemeta_traverse(PyArray_DTypeMeta *type, visitproc visit, void *arg) +{ + /* + * We have to traverse the base class (if it is a HeapType). + * PyType_Type will handle this logic for us. + * This function is currently not used, but will probably be necessary + * in the future when we implement HeapTypes (python/dynamically + * defined types). It should be revised at that time. + */ + assert(0); + assert(!type->legacy && (PyTypeObject *)type != &PyArrayDescr_Type); + Py_VISIT(type->singleton); + Py_VISIT(type->scalar_type); + return PyType_Type.tp_traverse((PyObject *)type, visit, arg); +} + + +static PyObject * +legacy_dtype_default_new(PyArray_DTypeMeta *self, + PyObject *args, PyObject *kwargs) +{ + /* TODO: This should allow endianess and possibly metadata */ + if (self->parametric) { + /* reject parametric ones since we would need to get unit, etc. info */ + PyErr_Format(PyExc_TypeError, + "Preliminary-API: Flexible/Parametric legacy DType '%S' can " + "only be instantiated using `np.dtype(...)`", self); + return NULL; + } + + if (PyTuple_GET_SIZE(args) != 0 || + (kwargs != NULL && PyDict_Size(kwargs))) { + PyErr_Format(PyExc_TypeError, + "currently only the no-argument instantiation is supported; " + "use `np.dtype` instead."); + return NULL; + } + Py_INCREF(self->singleton); + return (PyObject *)self->singleton; +} + +/** + * This function takes a PyArray_Descr and replaces its base class with + * a newly created dtype subclass (DTypeMeta instances). + * There are some subtleties that need to be remembered when doing this, + * first for the class objects itself it could be either a HeapType or not. + * Since we are defining the DType from C, we will not make it a HeapType, + * thus making it identical to a typical *static* type (except that we + * malloc it). We could do it the other way, but there seems no reason to + * do so. + * + * The DType instances (the actual dtypes or descriptors), are based on + * prototypes which are passed in. These should not be garbage collected + * and thus Py_TPFLAGS_HAVE_GC is not set. (We could allow this, but than + * would have to allocate a new object, since the GC needs information before + * the actual struct). + * + * The above is the reason why we should works exactly like we would for a + * static type here. + * Otherwise, we blurry the lines between C-defined extension classes + * and Python subclasses. e.g. `class MyInt(int): pass` is very different + * from our `class Float64(np.dtype): pass`, because the latter should not + * be a HeapType and its instances should be exact PyArray_Descr structs. + * + * @param descr The descriptor that should be wrapped. + * @param name The name for the DType, if NULL the type character is used. + * + * @returns 0 on success, -1 on failure. + */ +NPY_NO_EXPORT int +dtypemeta_wrap_legacy_descriptor(PyArray_Descr *descr) +{ + if (Py_TYPE(descr) != &PyArrayDescr_Type) { + PyErr_Format(PyExc_RuntimeError, + "During creation/wrapping of legacy DType, the original class " + "was not PyArrayDescr_Type (it is replaced in this step)."); + return -1; + } + + /* + * Note: we have no intention of freeing the memory again since this + * behaves identically to static type definition (see comment above). + * This is seems cleaner for the legacy API, in the new API both static + * and heap types are possible (some difficulty arises from the fact that + * these are instances of DTypeMeta and not type). + * In particular our own DTypes can be true static declarations. + * However, this function remains necessary for legacy user dtypes. + */ + + const char *scalar_name = descr->typeobj->tp_name; + /* + * We have to take only the name, and ignore the module to get + * a reasonable __name__, since static types are limited in this regard + * (this is not ideal, but not a big issue in practice). + * This is what Python does to print __name__ for static types. + */ + const char *dot = strrchr(scalar_name, '.'); + if (dot) { + scalar_name = dot + 1; + } + ssize_t name_length = strlen(scalar_name) + 14; + + char *tp_name = malloc(name_length); + if (tp_name == NULL) { + PyErr_NoMemory(); + return -1; + } + + snprintf(tp_name, name_length, "numpy.dtype[%s]", scalar_name); + + PyArray_DTypeMeta *dtype_class = malloc(sizeof(PyArray_DTypeMeta)); + if (dtype_class == NULL) { + PyDataMem_FREE(tp_name); + return -1; + } + /* + * Initialize the struct fields identically to static code by copying + * a prototype instances for everything except our own fields which + * vary between the DTypes. + * In particular any Object initialization must be strictly copied from + * the untouched prototype to avoid complexities (e.g. with PyPy). + * Any Type slots need to be fixed before PyType_Ready, although most + * will be inherited automatically there. + */ + static PyArray_DTypeMeta prototype = { + {{ + PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) + .tp_name = NULL, /* set below */ + .tp_basicsize = sizeof(PyArray_Descr), + .tp_flags = Py_TPFLAGS_DEFAULT, + .tp_base = &PyArrayDescr_Type, + .tp_new = (newfunc)legacy_dtype_default_new, + },}, + .legacy = 1, + .abstract = 0, /* this is a concrete DType */ + /* Further fields are not common between DTypes */ + }; + memcpy(dtype_class, &prototype, sizeof(PyArray_DTypeMeta)); + /* Fix name of the Type*/ + ((PyTypeObject *)dtype_class)->tp_name = tp_name; + + /* Let python finish the initialization (probably unnecessary) */ + if (PyType_Ready((PyTypeObject *)dtype_class) < 0) { + return -1; + } + + /* + * Fill DTypeMeta information that varies between DTypes, any variable + * type information would need to be set before PyType_Ready(). + */ + dtype_class->singleton = descr; + Py_INCREF(descr->typeobj); + dtype_class->scalar_type = descr->typeobj; + dtype_class->type_num = descr->type_num; + dtype_class->type = descr->type; + dtype_class->f = descr->f; + dtype_class->kind = descr->kind; + + if (PyTypeNum_ISDATETIME(descr->type_num)) { + /* Datetimes are flexible, but were not considered previously */ + dtype_class->parametric = NPY_TRUE; + } + else if (PyTypeNum_ISFLEXIBLE(descr->type_num)) { + dtype_class->parametric = NPY_TRUE; + } + + /* Finally, replace the current class of the descr */ + Py_TYPE(descr) = (PyTypeObject *)dtype_class; + + return 0; +} + + +/* + * Simple exposed information, defined for each DType (class). This is + * preliminary (the flags should also return bools). + */ +static PyMemberDef dtypemeta_members[] = { + {"_abstract", + T_BYTE, offsetof(PyArray_DTypeMeta, abstract), READONLY, NULL}, + {"type", + T_OBJECT, offsetof(PyArray_DTypeMeta, scalar_type), READONLY, NULL}, + {"_parametric", + T_BYTE, offsetof(PyArray_DTypeMeta, parametric), READONLY, NULL}, + {NULL, 0, 0, 0, NULL}, +}; + + +NPY_NO_EXPORT PyTypeObject PyArrayDTypeMeta_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "numpy._DTypeMeta", + .tp_basicsize = sizeof(PyArray_DTypeMeta), + .tp_dealloc = (destructor)dtypemeta_dealloc, + /* Types are garbage collected (see dtypemeta_is_gc documentation) */ + .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, + .tp_doc = "Preliminary NumPy API: The Type of NumPy DTypes (metaclass)", + .tp_members = dtypemeta_members, + .tp_base = NULL, /* set to PyType_Type at import time */ + .tp_init = (initproc)dtypemeta_init, + .tp_new = dtypemeta_new, + .tp_is_gc = dtypemeta_is_gc, + .tp_traverse = (traverseproc)dtypemeta_traverse, +}; + diff --git a/numpy/core/src/multiarray/dtypemeta.h b/numpy/core/src/multiarray/dtypemeta.h new file mode 100644 index 000000000..97152d1ad --- /dev/null +++ b/numpy/core/src/multiarray/dtypemeta.h @@ -0,0 +1,7 @@ +#ifndef _NPY_DTYPEMETA_H +#define _NPY_DTYPEMETA_H + +NPY_NO_EXPORT int +dtypemeta_wrap_legacy_descriptor(PyArray_Descr *dtypem); + +#endif /*_NPY_DTYPEMETA_H */ diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 6915371d8..ab5076711 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -1872,6 +1872,7 @@ array_empty_like(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) /* steals the reference to dtype if it's not NULL */ ret = (PyArrayObject *)PyArray_NewLikeArrayWithShape(prototype, order, dtype, shape.len, shape.ptr, subok); + npy_free_cache_dim_obj(shape); if (!ret) { goto fail; } @@ -4445,6 +4446,18 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { if (set_matmul_flags(d) < 0) { goto err; } + + PyArrayDTypeMeta_Type.tp_base = &PyType_Type; + if (PyType_Ready(&PyArrayDTypeMeta_Type) < 0) { + goto err; + } + + PyArrayDescr_Type.tp_hash = PyArray_DescrHash; + Py_TYPE(&PyArrayDescr_Type) = &PyArrayDTypeMeta_Type; + if (PyType_Ready(&PyArrayDescr_Type) < 0) { + goto err; + } + initialize_casting_tables(); initialize_numeric_types(); if (initscalarmath(m) < 0) { @@ -4478,10 +4491,6 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { goto err; } - PyArrayDescr_Type.tp_hash = PyArray_DescrHash; - if (PyType_Ready(&PyArrayDescr_Type) < 0) { - goto err; - } if (PyType_Ready(&PyArrayFlags_Type) < 0) { goto err; } diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index 2f1767391..f13f50759 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -1333,74 +1333,46 @@ gentype_transpose_get(PyObject *self) static PyGetSetDef gentype_getsets[] = { {"ndim", (getter)gentype_ndim_get, - (setter) 0, - "number of array dimensions", - NULL}, + (setter) 0, NULL, NULL}, {"flags", (getter)gentype_flags_get, - (setter)0, - "integer value of flags", - NULL}, + (setter)0, NULL, NULL}, {"shape", (getter)gentype_shape_get, - (setter)0, - "tuple of array dimensions", - NULL}, + (setter)0, NULL, NULL}, {"strides", (getter)gentype_shape_get, - (setter) 0, - "tuple of bytes steps in each dimension", - NULL}, + (setter) 0, NULL, NULL}, {"data", (getter)gentype_data_get, - (setter) 0, - "pointer to start of data", - NULL}, + (setter) 0, NULL, NULL}, {"itemsize", (getter)gentype_itemsize_get, - (setter)0, - "length of one element in bytes", - NULL}, + (setter)0, NULL, NULL}, {"size", (getter)gentype_size_get, - (setter)0, - "number of elements in the gentype", - NULL}, + (setter)0, NULL, NULL}, {"nbytes", (getter)gentype_itemsize_get, - (setter)0, - "length of item in bytes", - NULL}, + (setter)0, NULL, NULL}, {"base", (getter)gentype_base_get, - (setter)0, - "base object", - NULL}, + (setter)0, NULL, NULL}, {"dtype", (getter)gentype_typedescr_get, - NULL, - "get array data-descriptor", - NULL}, + NULL, NULL, NULL}, {"real", (getter)gentype_real_get, - (setter)0, - "real part of scalar", - NULL}, + (setter)0, NULL, NULL}, {"imag", (getter)gentype_imag_get, - (setter)0, - "imaginary part of scalar", - NULL}, + (setter)0, NULL, NULL}, {"flat", (getter)gentype_flat_get, - (setter)0, - "a 1-d view of scalar", - NULL}, + (setter)0, NULL, NULL}, {"T", (getter)gentype_transpose_get, - (setter)0, - "transpose", - NULL}, + (setter)0, NULL, NULL}, {"__array_interface__", (getter)gentype_interface_get, NULL, diff --git a/numpy/core/src/multiarray/strfuncs.c b/numpy/core/src/multiarray/strfuncs.c index b570aec08..363cbdba2 100644 --- a/numpy/core/src/multiarray/strfuncs.c +++ b/numpy/core/src/multiarray/strfuncs.c @@ -168,15 +168,13 @@ array_repr_builtin(PyArrayObject *self, int repr) NPY_NO_EXPORT PyObject * array_repr(PyArrayObject *self) { - PyObject *s, *arglist; + PyObject *s; if (PyArray_ReprFunction == NULL) { s = array_repr_builtin(self, 1); } else { - arglist = Py_BuildValue("(O)", self); - s = PyEval_CallObject(PyArray_ReprFunction, arglist); - Py_DECREF(arglist); + s = PyObject_CallFunctionObjArgs(PyArray_ReprFunction, self, NULL); } return s; } @@ -185,15 +183,13 @@ array_repr(PyArrayObject *self) NPY_NO_EXPORT PyObject * array_str(PyArrayObject *self) { - PyObject *s, *arglist; + PyObject *s; if (PyArray_StrFunction == NULL) { s = array_repr_builtin(self, 0); } else { - arglist = Py_BuildValue("(O)", self); - s = PyEval_CallObject(PyArray_StrFunction, arglist); - Py_DECREF(arglist); + s = PyObject_CallFunctionObjArgs(PyArray_StrFunction, self, NULL); } return s; } diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c index 997467b4d..bc320138d 100644 --- a/numpy/core/src/multiarray/usertypes.c +++ b/numpy/core/src/multiarray/usertypes.c @@ -37,6 +37,7 @@ maintainer email: oliphant.travis@ieee.org #include "npy_pycompat.h" #include "usertypes.h" +#include "dtypemeta.h" NPY_NO_EXPORT PyArray_Descr **userdescrs=NULL; @@ -226,6 +227,11 @@ PyArray_RegisterDataType(PyArray_Descr *descr) return -1; } userdescrs[NPY_NUMUSERTYPES++] = descr; + + if (dtypemeta_wrap_legacy_descriptor(descr) < 0) { + return -1; + } + return typenum; } diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index a5c663a47..a59a9acf5 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -225,10 +225,6 @@ NPY_NO_EXPORT void PyUFunc_O_O_method(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func) { char *meth = (char *)func; - PyObject *tup = PyTuple_New(0); - if (tup == NULL) { - return; - } UNARY_LOOP { PyObject *in1 = *(PyObject **)ip1; PyObject **out = (PyObject **)op1; @@ -247,20 +243,17 @@ PyUFunc_O_O_method(char **args, npy_intp const *dimensions, npy_intp const *step "type %s which has no callable %s method", i, type->tp_name, meth); npy_PyErr_ChainExceptionsCause(exc, val, tb); - Py_DECREF(tup); Py_XDECREF(func); return; } - ret = PyObject_Call(func, tup, NULL); + ret = PyObject_CallObject(func, NULL); Py_DECREF(func); if (ret == NULL) { - Py_DECREF(tup); return; } Py_XDECREF(*out); *out = ret; } - Py_DECREF(tup); } /*UFUNC_API*/ @@ -359,7 +352,7 @@ PyUFunc_On_Om(char **args, npy_intp const *dimensions, npy_intp const *steps, vo PyTuple_SET_ITEM(arglist, j, in); Py_INCREF(in); } - result = PyEval_CallObject(tocall, arglist); + result = PyObject_CallObject(tocall, arglist); Py_DECREF(arglist); if (result == NULL) { return; diff --git a/numpy/core/tests/examples/checks.pyx b/numpy/core/tests/examples/checks.pyx new file mode 100644 index 000000000..ecf0ad3fa --- /dev/null +++ b/numpy/core/tests/examples/checks.pyx @@ -0,0 +1,26 @@ +""" +Functions in this module give python-space wrappers for cython functions +exposed in numpy/__init__.pxd, so they can be tested in test_cython.py +""" +cimport numpy as cnp +cnp.import_array() + + +def is_td64(obj): + return cnp.is_timedelta64_object(obj) + + +def is_dt64(obj): + return cnp.is_datetime64_object(obj) + + +def get_dt64_value(obj): + return cnp.get_datetime64_value(obj) + + +def get_td64_value(obj): + return cnp.get_timedelta64_value(obj) + + +def get_dt64_unit(obj): + return cnp.get_datetime64_unit(obj) diff --git a/numpy/core/tests/examples/setup.py b/numpy/core/tests/examples/setup.py new file mode 100644 index 000000000..9860bf5f7 --- /dev/null +++ b/numpy/core/tests/examples/setup.py @@ -0,0 +1,26 @@ +""" +Provide python-space access to the functions exposed in numpy/__init__.pxd +for testing. +""" + +import numpy as np +from distutils.core import setup +from Cython.Build import cythonize +from setuptools.extension import Extension +import os + +here = os.path.dirname(__file__) +macros = [("NPY_NO_DEPRECATED_API", 0)] + +checks = Extension( + "checks", + sources=[os.path.join(here, "checks.pyx")], + include_dirs=[np.get_include()], + define_macros=macros, +) + +extensions = [checks] + +setup( + ext_modules=cythonize(extensions) +) diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py index e29217461..a2703d81b 100644 --- a/numpy/core/tests/test_arrayprint.py +++ b/numpy/core/tests/test_arrayprint.py @@ -395,6 +395,81 @@ class TestArray2String: "[ 'xxxxx']" ) + def test_multiline_repr(self): + class MultiLine: + def __repr__(self): + return "Line 1\nLine 2" + + a = np.array([[None, MultiLine()], [MultiLine(), None]]) + + assert_equal( + np.array2string(a), + '[[None Line 1\n' + ' Line 2]\n' + ' [Line 1\n' + ' Line 2 None]]' + ) + assert_equal( + np.array2string(a, max_line_width=5), + '[[None\n' + ' Line 1\n' + ' Line 2]\n' + ' [Line 1\n' + ' Line 2\n' + ' None]]' + ) + assert_equal( + repr(a), + 'array([[None, Line 1\n' + ' Line 2],\n' + ' [Line 1\n' + ' Line 2, None]], dtype=object)' + ) + + class MultiLineLong: + def __repr__(self): + return "Line 1\nLooooooooooongestLine2\nLongerLine 3" + + a = np.array([[None, MultiLineLong()], [MultiLineLong(), None]]) + assert_equal( + repr(a), + 'array([[None, Line 1\n' + ' LooooooooooongestLine2\n' + ' LongerLine 3 ],\n' + ' [Line 1\n' + ' LooooooooooongestLine2\n' + ' LongerLine 3 , None]], dtype=object)' + ) + assert_equal( + np.array_repr(a, 20), + 'array([[None,\n' + ' Line 1\n' + ' LooooooooooongestLine2\n' + ' LongerLine 3 ],\n' + ' [Line 1\n' + ' LooooooooooongestLine2\n' + ' LongerLine 3 ,\n' + ' None]],\n' + ' dtype=object)' + ) + + def test_nested_array_repr(self): + a = np.empty((2, 2), dtype=object) + a[0, 0] = np.eye(2) + a[0, 1] = np.eye(3) + a[1, 0] = None + a[1, 1] = np.ones((3, 1)) + assert_equal( + repr(a), + 'array([[array([[1., 0.],\n' + ' [0., 1.]]), array([[1., 0., 0.],\n' + ' [0., 1., 0.],\n' + ' [0., 0., 1.]])],\n' + ' [None, array([[1.],\n' + ' [1.],\n' + ' [1.]])]], dtype=object)' + ) + @given(hynp.from_dtype(np.dtype("U"))) def test_any_text(self, text): # This test checks that, given any value that can be represented in an diff --git a/numpy/core/tests/test_cython.py b/numpy/core/tests/test_cython.py new file mode 100644 index 000000000..92ef09c9b --- /dev/null +++ b/numpy/core/tests/test_cython.py @@ -0,0 +1,128 @@ +import os +import shutil +import subprocess +import sys +import pytest + +import numpy as np + +# This import is copied from random.tests.test_extending +try: + import cython + from Cython.Compiler.Version import version as cython_version +except ImportError: + cython = None +else: + from distutils.version import LooseVersion + + # Cython 0.29.14 is required for Python 3.8 and there are + # other fixes in the 0.29 series that are needed even for earlier + # Python versions. + # Note: keep in sync with the one in pyproject.toml + required_version = LooseVersion("0.29.14") + if LooseVersion(cython_version) < required_version: + # too old or wrong cython, skip the test + cython = None + +pytestmark = pytest.mark.skipif(cython is None, reason="requires cython") + + +@pytest.fixture +def install_temp(request, tmp_path): + # Based in part on test_cython from random.tests.test_extending + + here = os.path.dirname(__file__) + ext_dir = os.path.join(here, "examples") + + tmp_path = tmp_path._str + cytest = os.path.join(tmp_path, "cytest") + + shutil.copytree(ext_dir, cytest) + # build the examples and "install" them into a temporary directory + + install_log = os.path.join(tmp_path, "tmp_install_log.txt") + subprocess.check_call( + [ + sys.executable, + "setup.py", + "build", + "install", + "--prefix", + os.path.join(tmp_path, "installdir"), + "--single-version-externally-managed", + "--record", + install_log, + ], + cwd=cytest, + ) + + # In order to import the built module, we need its path to sys.path + # so parse that out of the record + with open(install_log) as fid: + for line in fid: + if "checks" in line: + sys.path.append(os.path.dirname(line)) + break + else: + raise RuntimeError(f'could not parse "{install_log}"') + + +def test_is_timedelta64_object(install_temp): + import checks + + assert checks.is_td64(np.timedelta64(1234)) + assert checks.is_td64(np.timedelta64(1234, "ns")) + assert checks.is_td64(np.timedelta64("NaT", "ns")) + + assert not checks.is_td64(1) + assert not checks.is_td64(None) + assert not checks.is_td64("foo") + assert not checks.is_td64(np.datetime64("now", "s")) + + +def test_is_datetime64_object(install_temp): + import checks + + assert checks.is_dt64(np.datetime64(1234, "ns")) + assert checks.is_dt64(np.datetime64("NaT", "ns")) + + assert not checks.is_dt64(1) + assert not checks.is_dt64(None) + assert not checks.is_dt64("foo") + assert not checks.is_dt64(np.timedelta64(1234)) + + +def test_get_datetime64_value(install_temp): + import checks + + dt64 = np.datetime64("2016-01-01", "ns") + + result = checks.get_dt64_value(dt64) + expected = dt64.view("i8") + + assert result == expected + + +def test_get_timedelta64_value(install_temp): + import checks + + td64 = np.timedelta64(12345, "h") + + result = checks.get_td64_value(td64) + expected = td64.view("i8") + + assert result == expected + + +def test_get_datetime64_unit(install_temp): + import checks + + dt64 = np.datetime64("2016-01-01", "ns") + result = checks.get_dt64_unit(dt64) + expected = 10 + assert result == expected + + td64 = np.timedelta64(12345, "h") + result = checks.get_dt64_unit(td64) + expected = 5 + assert result == expected diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index c9a65cd9c..73aa01de6 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -1091,6 +1091,40 @@ class TestFromDTypeAttribute: with pytest.raises(RecursionError): np.dtype(dt(1)) + +class TestDTypeClasses: + @pytest.mark.parametrize("dtype", list(np.typecodes['All']) + [rational]) + def test_basic_dtypes_subclass_properties(self, dtype): + # Note: Except for the isinstance and type checks, these attributes + # are considered currently private and may change. + dtype = np.dtype(dtype) + assert isinstance(dtype, np.dtype) + assert type(dtype) is not np.dtype + assert type(dtype).__name__ == f"dtype[{dtype.type.__name__}]" + assert type(dtype).__module__ == "numpy" + assert not type(dtype)._abstract + + # the flexible dtypes and datetime/timedelta have additional parameters + # which are more than just storage information, these would need to be + # given when creating a dtype: + parametric = (np.void, np.str_, np.bytes_, np.datetime64, np.timedelta64) + if dtype.type not in parametric: + assert not type(dtype)._parametric + assert type(dtype)() is dtype + else: + assert type(dtype)._parametric + with assert_raises(TypeError): + type(dtype)() + + def test_dtype_superclass(self): + assert type(np.dtype) is not type + assert isinstance(np.dtype, type) + + assert type(np.dtype).__name__ == "_DTypeMeta" + assert type(np.dtype).__module__ == "numpy" + assert np.dtype._abstract + + class TestFromCTypes: @staticmethod diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index a698370b6..1a8268eb8 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -845,6 +845,13 @@ class TestCreation: assert_raises(ValueError, np.zeros, shape, dtype=np.int8) assert_raises(ValueError, np.ones, shape, dtype=np.int8) + @pytest.mark.skipif(np.dtype(np.intp).itemsize != 8, + reason="malloc may not fail on 32 bit systems") + def test_malloc_fails(self): + # This test is guaranteed to fail due to a too large allocation + with assert_raises(np.core._exceptions._ArrayMemoryError): + np.empty(np.iinfo(np.intp).max, dtype=np.uint8) + def test_zeros(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] for dt in types: @@ -929,7 +936,7 @@ class TestCreation: d = np.empty(i, dtype='U') str(d) - def test_sequence_non_homogenous(self): + def test_sequence_non_homogeneous(self): assert_equal(np.array([4, 2**80]).dtype, object) assert_equal(np.array([4, 2**80, 4]).dtype, object) assert_equal(np.array([2**80, 4]).dtype, object) @@ -988,7 +995,22 @@ class TestCreation: def __len__(self): return 42 - assert_raises(ValueError, np.array, C()) # segfault? + a = np.array(C()) # segfault? + assert_equal(len(a), 0) + + def test_false_len_iterable(self): + # Special case where a bad __getitem__ makes us fall back on __iter__: + class C: + def __getitem__(self, x): + raise Exception + def __iter__(self): + return iter(()) + def __len__(self): + return 2 + + a = np.empty(2) + with assert_raises(ValueError): + a[:] = C() # Segfault! def test_failed_len_sequence(self): # gh-7393 @@ -1804,7 +1826,7 @@ class TestMethods: c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) - + def test_sort_structured(self): # test record array sorts. dt = np.dtype([('f', float), ('i', int)]) @@ -5867,70 +5889,6 @@ class TestDot: assert_equal(np.dot(b, a), res) assert_equal(np.dot(b, b), res) - def test_accelerate_framework_sgemv_fix(self): - - def aligned_array(shape, align, dtype, order='C'): - d = dtype(0) - N = np.prod(shape) - tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) - address = tmp.__array_interface__["data"][0] - for offset in range(align): - if (address + offset) % align == 0: - break - tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) - return tmp.reshape(shape, order=order) - - def as_aligned(arr, align, dtype, order='C'): - aligned = aligned_array(arr.shape, align, dtype, order) - aligned[:] = arr[:] - return aligned - - def assert_dot_close(A, X, desired): - assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7) - - m = aligned_array(100, 15, np.float32) - s = aligned_array((100, 100), 15, np.float32) - np.dot(s, m) # this will always segfault if the bug is present - - testdata = itertools.product((15,32), (10000,), (200,89), ('C','F')) - for align, m, n, a_order in testdata: - # Calculation in double precision - A_d = np.random.rand(m, n) - X_d = np.random.rand(n) - desired = np.dot(A_d, X_d) - # Calculation with aligned single precision - A_f = as_aligned(A_d, align, np.float32, order=a_order) - X_f = as_aligned(X_d, align, np.float32) - assert_dot_close(A_f, X_f, desired) - # Strided A rows - A_d_2 = A_d[::2] - desired = np.dot(A_d_2, X_d) - A_f_2 = A_f[::2] - assert_dot_close(A_f_2, X_f, desired) - # Strided A columns, strided X vector - A_d_22 = A_d_2[:, ::2] - X_d_2 = X_d[::2] - desired = np.dot(A_d_22, X_d_2) - A_f_22 = A_f_2[:, ::2] - X_f_2 = X_f[::2] - assert_dot_close(A_f_22, X_f_2, desired) - # Check the strides are as expected - if a_order == 'F': - assert_equal(A_f_22.strides, (8, 8 * m)) - else: - assert_equal(A_f_22.strides, (8 * n, 8)) - assert_equal(X_f_2.strides, (8,)) - # Strides in A rows + cols only - X_f_2c = as_aligned(X_f_2, align, np.float32) - assert_dot_close(A_f_22, X_f_2c, desired) - # Strides just in A cols - A_d_12 = A_d[:, ::2] - desired = np.dot(A_d_12, X_d_2) - A_f_12 = A_f[:, ::2] - assert_dot_close(A_f_12, X_f_2c, desired) - # Strides in A cols and X - assert_dot_close(A_f_12, X_f_2, desired) - class MatmulCommon: """Common tests for '@' operator and numpy.matmul. diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index acd442e2f..2a87ffaf8 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -31,6 +31,17 @@ class TestResize: Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]]) assert_equal(np.resize(A, (4, 3)), Ar3) + def test_repeats(self): + A = np.array([1, 2, 3]) + Ar1 = np.array([[1, 2, 3, 1], [2, 3, 1, 2]]) + assert_equal(np.resize(A, (2, 4)), Ar1) + + Ar2 = np.array([[1, 2], [3, 1], [2, 3], [1, 2]]) + assert_equal(np.resize(A, (4, 2)), Ar2) + + Ar3 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) + assert_equal(np.resize(A, (4, 3)), Ar3) + def test_zeroresize(self): A = np.array([[1, 2], [3, 4]]) Ar = np.resize(A, (0,)) @@ -50,6 +61,23 @@ class TestResize: assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype)) assert_equal(A.dtype, Ar.dtype) + def test_negative_resize(self): + A = np.arange(0, 10, dtype=np.float32) + new_shape = (-10, -1) + with pytest.raises(ValueError, match=r"negative"): + np.resize(A, new_shape=new_shape) + + def test_subclass(self): + class MyArray(np.ndarray): + __array_priority__ = 1. + + my_arr = np.array([1]).view(MyArray) + assert type(np.resize(my_arr, 5)) is MyArray + assert type(np.resize(my_arr, 0)) is MyArray + + my_arr = np.array([]).view(MyArray) + assert type(np.resize(my_arr, 5)) is MyArray + class TestNonarrayArgs: # check that non-array arguments to functions wrap them in arrays diff --git a/numpy/distutils/command/autodist.py b/numpy/distutils/command/autodist.py index 1475a5e24..8f6436004 100644 --- a/numpy/distutils/command/autodist.py +++ b/numpy/distutils/command/autodist.py @@ -69,7 +69,10 @@ def check_gcc_function_attribute(cmd, attribute, name): #pragma GCC diagnostic error "-Wattributes" #pragma clang diagnostic error "-Wattributes" - int %s %s(void*); + int %s %s(void* unused) + { + return 0; + } int main() diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py index 078b8fb59..d53285c92 100644 --- a/numpy/distutils/command/build_ext.py +++ b/numpy/distutils/command/build_ext.py @@ -520,7 +520,7 @@ class build_ext (old_build_ext): # Wrap unlinkable objects to a linkable one if unlinkable_fobjects: - fobjects = [os.path.relpath(obj) for obj in unlinkable_fobjects] + fobjects = [os.path.abspath(obj) for obj in unlinkable_fobjects] wrapped = fcompiler.wrap_unlinkable_objects( fobjects, output_dir=self.build_temp, extra_dll_dir=self.extra_dll_dir) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 3a6a7b29d..df82683dc 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -362,6 +362,22 @@ default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] so_ext = get_shared_lib_extension() +def is_symlink_to_accelerate(filename): + accelpath = '/System/Library/Frameworks/Accelerate.framework' + return (sys.platform == 'darwin' and os.path.islink(filename) and + os.path.realpath(filename).startswith(accelpath)) + + +_accel_msg = ( + 'Found {filename}, but that file is a symbolic link to the ' + 'MacOS Accelerate framework, which is not supported by NumPy. ' + 'You must configure the build to use a different optimized library, ' + 'or disable the use of optimized BLAS and LAPACK by setting the ' + 'environment variables NPY_BLAS_ORDER="" and NPY_LAPACK_ORDER="" ' + 'before building NumPy.' +) + + def get_standard_file(fname): """Returns a list of files named 'fname' from 1) System-wide directory (directory-location of this module) @@ -427,7 +443,6 @@ def get_info(name, notfound_action=0): 'blis': blis_info, # use blas_opt instead 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead 'blas_mkl': blas_mkl_info, # use blas_opt instead - 'accelerate': accelerate_info, # use blas_opt instead 'openblas64_': openblas64__info, 'openblas64__lapack': openblas64__lapack_info, 'openblas_ilp64': openblas_ilp64_info, @@ -919,6 +934,9 @@ class system_info: for prefix in lib_prefixes: p = self.combine_paths(lib_dir, prefix + lib + ext) if p: + # p[0] is the full path to the binary library file. + if is_symlink_to_accelerate(p[0]): + raise RuntimeError(_accel_msg.format(filename=p[0])) break if p: assert len(p) == 1 @@ -1650,8 +1668,8 @@ def get_atlas_version(**config): class lapack_opt_info(system_info): notfounderror = LapackNotFoundError - # List of all known BLAS libraries, in the default order - lapack_order = ['mkl', 'openblas', 'flame', 'atlas', 'accelerate', 'lapack'] + # List of all known LAPACK libraries, in the default order + lapack_order = ['mkl', 'openblas', 'flame', 'atlas', 'lapack'] order_env_var_name = 'NPY_LAPACK_ORDER' def _calc_info_mkl(self): @@ -1823,7 +1841,7 @@ class lapack64__opt_info(lapack_ilp64_opt_info): class blas_opt_info(system_info): notfounderror = BlasNotFoundError # List of all known BLAS libraries, in the default order - blas_order = ['mkl', 'blis', 'openblas', 'atlas', 'accelerate', 'blas'] + blas_order = ['mkl', 'blis', 'openblas', 'atlas', 'blas'] order_env_var_name = 'NPY_BLAS_ORDER' def _calc_info_mkl(self): diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py index 16a3b9241..31130559b 100644 --- a/numpy/doc/glossary.py +++ b/numpy/doc/glossary.py @@ -169,7 +169,7 @@ Glossary Collapsed to a one-dimensional array. See `numpy.ndarray.flatten` for details. - homogenous + homogeneous Describes a block of memory comprised of blocks, each block comprised of items and of the same size, and blocks are interpreted in exactly the same way. In the simplest case each block contains a single item, for diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 0b23dbebd..48b0a0830 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1332,6 +1332,10 @@ def interp(x, xp, fp, left=None, right=None, period=None): If `xp` or `fp` are not 1-D sequences If `period == 0` + See Also + -------- + scipy.interpolate + Notes ----- The x-coordinate sequence is expected to be increasing, but this is not @@ -3869,15 +3873,20 @@ def _quantile_is_valid(q): return True +def _lerp(a, b, t, out=None): + """ Linearly interpolate from a to b by a factor of t """ + return add(a*(1 - t), b*t, out=out) + + def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): a = asarray(a) - if q.ndim == 0: - # Do not allow 0-d arrays because following code fails for scalar - zerod = True - q = q[None] - else: - zerod = False + + # ufuncs cause 0d array results to decay to scalars (see gh-13105), which + # makes them problematic for __setitem__ and attribute access. As a + # workaround, we call this on the result of every ufunc on a possibly-0d + # array. + not_scalar = np.asanyarray # prepare a for partitioning if overwrite_input: @@ -3894,9 +3903,14 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, if axis is None: axis = 0 - Nx = ap.shape[axis] - indices = q * (Nx - 1) + if q.ndim > 2: + # The code below works fine for nd, but it might not have useful + # semantics. For now, keep the supported dimensions the same as it was + # before. + raise ValueError("q must be a scalar or 1d") + Nx = ap.shape[axis] + indices = not_scalar(q * (Nx - 1)) # round fractional indices according to interpolation method if interpolation == 'lower': indices = floor(indices).astype(intp) @@ -3913,74 +3927,60 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, "interpolation can only be 'linear', 'lower' 'higher', " "'midpoint', or 'nearest'") - n = np.array(False, dtype=bool) # check for nan's flag - if np.issubdtype(indices.dtype, np.integer): # take the points along axis - # Check if the array contains any nan's - if np.issubdtype(a.dtype, np.inexact): - indices = concatenate((indices, [-1])) + # The dimensions of `q` are prepended to the output shape, so we need the + # axis being sampled from `ap` to be first. + ap = np.moveaxis(ap, axis, 0) + del axis - ap.partition(indices, axis=axis) - # ensure axis with q-th is first - ap = np.moveaxis(ap, axis, 0) - axis = 0 + if np.issubdtype(indices.dtype, np.integer): + # take the points along axis - # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): - indices = indices[:-1] - n = np.isnan(ap[-1:, ...]) - - if zerod: - indices = indices[0] - r = take(ap, indices, axis=axis, out=out) - - else: # weight the points above and below the indices - indices_below = floor(indices).astype(intp) - indices_above = indices_below + 1 - indices_above[indices_above > Nx - 1] = Nx - 1 - - # Check if the array contains any nan's - if np.issubdtype(a.dtype, np.inexact): - indices_above = concatenate((indices_above, [-1])) + # may contain nan, which would sort to the end + ap.partition(concatenate((indices.ravel(), [-1])), axis=0) + n = np.isnan(ap[-1]) + else: + # cannot contain nan + ap.partition(indices.ravel(), axis=0) + n = np.array(False, dtype=bool) - ap.partition(concatenate((indices_below, indices_above)), axis=axis) + r = take(ap, indices, axis=0, out=out) - # ensure axis with q-th is first - ap = np.moveaxis(ap, axis, 0) - axis = 0 + else: + # weight the points above and below the indices - weights_shape = [1] * ap.ndim - weights_shape[axis] = len(indices) - weights_above = (indices - indices_below).reshape(weights_shape) + indices_below = not_scalar(floor(indices)).astype(intp) + indices_above = not_scalar(indices_below + 1) + indices_above[indices_above > Nx - 1] = Nx - 1 - # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): - indices_above = indices_above[:-1] - n = np.isnan(ap[-1:, ...]) + # may contain nan, which would sort to the end + ap.partition(concatenate(( + indices_below.ravel(), indices_above.ravel(), [-1] + )), axis=0) + n = np.isnan(ap[-1]) + else: + # cannot contain nan + ap.partition(concatenate(( + indices_below.ravel(), indices_above.ravel() + )), axis=0) + n = np.array(False, dtype=bool) - x1 = take(ap, indices_below, axis=axis) * (1 - weights_above) - x2 = take(ap, indices_above, axis=axis) * weights_above + weights_shape = indices.shape + (1,) * (ap.ndim - 1) + weights_above = not_scalar(indices - indices_below).reshape(weights_shape) - if zerod: - x1 = x1.squeeze(0) - x2 = x2.squeeze(0) + x_below = take(ap, indices_below, axis=0) + x_above = take(ap, indices_above, axis=0) - r = add(x1, x2, out=out) + r = _lerp(x_below, x_above, weights_above, out=out) + # if any slice contained a nan, then all results on that slice are also nan if np.any(n): - if zerod: - if ap.ndim == 1: - if out is not None: - out[...] = a.dtype.type(np.nan) - r = out - else: - r = a.dtype.type(np.nan) - else: - r[..., n.squeeze(0)] = a.dtype.type(np.nan) + if r.ndim == 0 and out is None: + # can't write to a scalar + r = a.dtype.type(np.nan) else: - if r.ndim == 1: - r[:] = a.dtype.type(np.nan) - else: - r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan) + r[..., n] = a.dtype.type(np.nan) return r diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index b4e928273..9ba0be56a 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2423,6 +2423,15 @@ class TestBincount: assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) + @pytest.mark.parametrize("vals", [[[2, 2]], 2]) + def test_error_not_1d(self, vals): + # Test that values has to be 1-D (both as array and nested list) + vals_arr = np.asarray(vals) + with assert_raises(ValueError): + np.bincount(vals_arr) + with assert_raises(ValueError): + np.bincount(vals) + class TestInterp: @@ -3350,12 +3359,50 @@ class TestAdd_newdoc: @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") def test_add_doc(self): - # test np.add_newdoc + # test that np.add_newdoc did attach a docstring successfully: tgt = "Current flat index into the array." assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt) assert_(len(np.core.ufunc.identity.__doc__) > 300) assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300) + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + def test_errors_are_ignored(self): + prev_doc = np.core.flatiter.index.__doc__ + # nothing changed, but error ignored, this should probably + # give a warning (or even error) in the future. + np.add_newdoc("numpy.core", "flatiter", ("index", "bad docstring")) + assert prev_doc == np.core.flatiter.index.__doc__ + + +class TestAddDocstring(): + # Test should possibly be moved, but it also fits to be close to + # the newdoc tests... + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") + def test_add_same_docstring(self): + # test for attributes (which are C-level defined) + np.add_docstring(np.ndarray.flat, np.ndarray.flat.__doc__) + # And typical functions: + def func(): + """docstring""" + return + + np.add_docstring(func, func.__doc__) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + def test_different_docstring_fails(self): + # test for attributes (which are C-level defined) + with assert_raises(RuntimeError): + np.add_docstring(np.ndarray.flat, "different docstring") + # And typical functions: + def func(): + """docstring""" + return + + with assert_raises(RuntimeError): + np.add_docstring(func, "different docstring") + + class TestSortComplex: @pytest.mark.parametrize("type_in, type_out", [ diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py index c96bf795a..261cfef5d 100644 --- a/numpy/lib/tests/test_utils.py +++ b/numpy/lib/tests/test_utils.py @@ -49,7 +49,7 @@ def old_func5(self, x): Bizarre indentation. """ return x -new_func5 = deprecate(old_func5) +new_func5 = deprecate(old_func5, message="This function is\ndeprecated.") def old_func6(self, x): @@ -74,10 +74,20 @@ def test_deprecate_fn(): @pytest.mark.skipif(sys.flags.optimize == 2, reason="-OO discards docstrings") -def test_deprecate_help_indentation(): - _compare_docs(old_func4, new_func4) - _compare_docs(old_func5, new_func5) - _compare_docs(old_func6, new_func6) +@pytest.mark.parametrize('old_func, new_func', [ + (old_func4, new_func4), + (old_func5, new_func5), + (old_func6, new_func6), +]) +def test_deprecate_help_indentation(old_func, new_func): + _compare_docs(old_func, new_func) + # Ensure we don't mess up the indentation + for knd, func in (('old', old_func), ('new', new_func)): + for li, line in enumerate(func.__doc__.split('\n')): + if li == 0: + assert line.startswith(' ') or not line.startswith(' '), knd + elif line: + assert line.startswith(' '), knd def _compare_docs(old_func, new_func): diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index f233c7240..d511c2a40 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -1,5 +1,6 @@ import os import sys +import textwrap import types import re import warnings @@ -9,9 +10,6 @@ from numpy.core.overrides import set_module from numpy.core import ndarray, ufunc, asarray import numpy as np -# getargspec and formatargspec were removed in Python 3.6 -from numpy.compat import getargspec, formatargspec - __all__ = [ 'issubclass_', 'issubsctype', 'issubdtype', 'deprecate', 'deprecate_with_doc', 'get_include', 'info', 'source', 'who', @@ -117,6 +115,7 @@ class _Deprecate: break skip += len(line) + 1 doc = doc[skip:] + depdoc = textwrap.indent(depdoc, ' ' * indent) doc = '\n\n'.join([depdoc, doc]) newfunc.__doc__ = doc try: @@ -552,9 +551,12 @@ def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'): file=output ) - elif inspect.isfunction(object): + elif inspect.isfunction(object) or inspect.ismethod(object): name = object.__name__ - arguments = formatargspec(*getargspec(object)) + try: + arguments = str(inspect.signature(object)) + except Exception: + arguments = "()" if len(name+arguments) > maxwidth: argstr = _split_line(name, arguments, maxwidth) @@ -566,18 +568,10 @@ def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'): elif inspect.isclass(object): name = object.__name__ - arguments = "()" try: - if hasattr(object, '__init__'): - arguments = formatargspec( - *getargspec(object.__init__.__func__) - ) - arglist = arguments.split(', ') - if len(arglist) > 1: - arglist[1] = "("+arglist[1] - arguments = ", ".join(arglist[1:]) + arguments = str(inspect.signature(object)) except Exception: - pass + arguments = "()" if len(name+arguments) > maxwidth: argstr = _split_line(name, arguments, maxwidth) @@ -605,26 +599,6 @@ def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'): ) print(" %s -- %s" % (meth, methstr), file=output) - elif inspect.ismethod(object): - name = object.__name__ - arguments = formatargspec( - *getargspec(object.__func__) - ) - arglist = arguments.split(', ') - if len(arglist) > 1: - arglist[1] = "("+arglist[1] - arguments = ", ".join(arglist[1:]) - else: - arguments = "()" - - if len(name+arguments) > maxwidth: - argstr = _split_line(name, arguments, maxwidth) - else: - argstr = name + arguments - - print(" " + argstr + "\n", file=output) - print(inspect.getdoc(object), file=output) - elif hasattr(object, '__doc__'): print(inspect.getdoc(object), file=output) diff --git a/numpy/linalg/setup.py b/numpy/linalg/setup.py index 57fdd502b..bb070ed9d 100644 --- a/numpy/linalg/setup.py +++ b/numpy/linalg/setup.py @@ -3,11 +3,17 @@ import sys def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info, system_info + from numpy.distutils.system_info import ( + get_info, system_info, lapack_opt_info, blas_opt_info) config = Configuration('linalg', parent_package, top_path) config.add_subpackage('tests') + # Accelerate is buggy, disallow it. See also numpy/core/setup.py + for opt_order in (blas_opt_info.blas_order, lapack_opt_info.lapack_order): + if 'accelerate' in opt_order: + opt_order.remove('accelerate') + # Configure lapack_lite src_dir = 'lapack_lite' diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index 860832be8..61ae91b00 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -1,8 +1,12 @@ from numpy.testing import assert_raises, assert_, assert_equal from numpy.compat import pickle +import sys +import subprocess +import textwrap from importlib import reload + def test_numpy_reloading(): # gh-7844. Also check that relevant globals retain their identity. import numpy as np @@ -29,3 +33,25 @@ def test_novalue(): assert_equal(repr(np._NoValue), '<no value>') assert_(pickle.loads(pickle.dumps(np._NoValue, protocol=proto)) is np._NoValue) + + +def test_full_reimport(): + """At the time of writing this, it is *not* truly supported, but + apparently enough users rely on it, for it to be an annoying change + when it started failing previously. + """ + # Test within a new process, to ensure that we do not mess with the + # global state during the test run (could lead to cryptic test failures). + # This is generally unsafe, especially, since we also reload the C-modules. + code = textwrap.dedent(r""" + import sys + import numpy as np + + for k in list(sys.modules.keys()): + if "numpy" in k: + del sys.modules[k] + + import numpy as np + """) + p = subprocess.run([sys.executable, '-c', code]) + assert p.returncode == 0 @@ -179,7 +179,7 @@ def check_submodules(): if 'path' in l: p = l.split('=')[-1].strip() if not os.path.exists(p): - raise ValueError(f'Submodule {p} missing') + raise ValueError('Submodule {} missing'.format(p)) proc = subprocess.Popen(['git', 'submodule', 'status'], @@ -188,7 +188,7 @@ def check_submodules(): status = status.decode("ascii", "replace") for line in status.splitlines(): if line.startswith('-') or line.startswith('+'): - raise ValueError(f'Submodule not clean: {line}') + raise ValueError('Submodule not clean: {}'.format(line)) diff --git a/test_requirements.txt b/test_requirements.txt index 607fabe1e..ffb27d7ec 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,8 +1,8 @@ -cython==0.29.17 -hypothesis==5.14.0 +cython==0.29.19 +hypothesis==5.15.1 pytest==5.4.2 pytz==2020.1 -pytest-cov==2.8.1 +pytest-cov==2.9.0 pickle5; python_version == '3.7' pickle5; python_version == '3.6' and platform_python_implementation != 'PyPy' # for numpy.random.test.test_extending diff --git a/tools/download-wheels.py b/tools/download-wheels.py index 276fcc6b2..941440ca9 100644 --- a/tools/download-wheels.py +++ b/tools/download-wheels.py @@ -16,7 +16,7 @@ __version__ = '0.1' # Edit these for other projects. STAGING_URL = 'https://anaconda.org/multibuild-wheels-staging/numpy' -PREFIX = '^.*numpy-' +PREFIX = 'numpy' def get_wheel_names(version): """ Get wheel names from Anaconda HTML directory. @@ -31,8 +31,8 @@ def get_wheel_names(version): """ http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED') - tmpl = re.compile(PREFIX + version + '.*\.whl$') - index_url = f"{STAGING_URL}/files" + tmpl = re.compile(rf"^.*{PREFIX}-{version}-.*\.whl$") + index_url = f"{STAGING_URL}/files" index_html = http.request('GET', index_url) soup = BeautifulSoup(index_html.data, 'html.parser') return soup.findAll(text=tmpl) @@ -54,13 +54,15 @@ def download_wheels(version, wheelhouse): """ http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED') wheel_names = get_wheel_names(version) - for wheel_name in wheel_names: + + for i, wheel_name in enumerate(wheel_names): wheel_url = f"{STAGING_URL}/{version}/download/{wheel_name}" wheel_path = os.path.join(wheelhouse, wheel_name) with open(wheel_path, 'wb') as f: with http.request('GET', wheel_url, preload_content=False,) as r: - print(f"Downloading {wheel_name}") + print(f"{i + 1:<4}{wheel_name}") shutil.copyfileobj(r, f) + print(f"\nTotal files downloaded: {len(wheel_names)}") if __name__ == '__main__': @@ -77,4 +79,9 @@ if __name__ == '__main__': args = parser.parse_args() wheelhouse = os.path.expanduser(args.wheelhouse) + if not os.path.isdir(wheelhouse): + raise RuntimeError( + f"{wheelhouse} wheelhouse directory is not present." + " Perhaps you need to use the '-w' flag to specify one.") + download_wheels(args.version, wheelhouse) diff --git a/tools/openblas_support.py b/tools/openblas_support.py index 6b2ad0f8c..cbb6a5e43 100644 --- a/tools/openblas_support.py +++ b/tools/openblas_support.py @@ -207,12 +207,12 @@ def make_init(dirname): and is created as part of the scripts that build the wheel. ''' import os - from ctypes import WinDLL import glob if os.name == 'nt': # convention for storing / loading the DLL from # numpy/.libs/, if present try: + from ctypes import WinDLL basedir = os.path.dirname(__file__) except: pass @@ -221,16 +221,16 @@ def make_init(dirname): DLL_filenames = [] if os.path.isdir(libs_dir): for filename in glob.glob(os.path.join(libs_dir, - '*openblas*dll')): + '*openblas*dll')): # NOTE: would it change behavior to load ALL # DLLs at this path vs. the name restriction? WinDLL(os.path.abspath(filename)) DLL_filenames.append(filename) - if len(DLL_filenames) > 1: - import warnings - warnings.warn("loaded more than 1 DLL from .libs:\\n%s" % - "\\n".join(DLL_filenames), - stacklevel=1) + if len(DLL_filenames) > 1: + import warnings + warnings.warn("loaded more than 1 DLL from .libs:\\n%s" % + "\\n".join(DLL_filenames), + stacklevel=1) """)) def test_setup(arches): diff --git a/tools/travis-upload-wheel.sh b/tools/travis-upload-wheel.sh deleted file mode 100755 index 06a8f3eba..000000000 --- a/tools/travis-upload-wheel.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -# -set -ex - -export CLOUD_CONTAINER_NAME=travis-dev-wheels - -if [[ ( ${USE_WHEEL} == 1 ) \ - && ( "${TRAVIS_BRANCH}" == "master" ) \ - && ( "${TRAVIS_PULL_REQUEST}" == "false" ) ]]; then - pip install wheelhouse_uploader - python -m wheelhouse_uploader upload --local-folder \ - ${TRAVIS_BUILD_DIR}/dist/ ${CLOUD_CONTAINER_NAME} -fi |