diff options
37 files changed, 1098 insertions, 648 deletions
diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py index 1c028542d..f302f262d 100644 --- a/benchmarks/benchmarks/bench_core.py +++ b/benchmarks/benchmarks/bench_core.py @@ -93,6 +93,12 @@ class Core(Benchmark): def time_tril_l10x10(self): np.tril(self.l10x10) + def time_triu_indices_500(self): + np.triu_indices(500) + + def time_tril_indices_500(self): + np.tril_indices(500) + class Temporaries(Benchmark): def setup(self): diff --git a/doc/neps/nep-0023-backwards-compatibility.rst b/doc/neps/nep-0023-backwards-compatibility.rst index c8bd7c180..af5bdab29 100644 --- a/doc/neps/nep-0023-backwards-compatibility.rst +++ b/doc/neps/nep-0023-backwards-compatibility.rst @@ -19,46 +19,222 @@ processes for individual cases where breaking backwards compatibility is considered. -Detailed description +Motivation and Scope -------------------- NumPy has a very large user base. Those users rely on NumPy being stable and the code they write that uses NumPy functionality to keep working. NumPy is also actively maintained and improved -- and sometimes improvements -require, or are made much easier, by breaking backwards compatibility. +require, or are made easier, by breaking backwards compatibility. Finally, there are trade-offs in stability for existing users vs. avoiding errors or having a better user experience for new users. These competing -needs often give rise to heated debates and delays in accepting or rejecting +needs often give rise to long debates and delay accepting or rejecting contributions. This NEP tries to address that by providing a policy as well as examples and rationales for when it is or isn't a good idea to break backwards compatibility. -General principles: - -- Aim not to break users' code unnecessarily. -- Aim never to change code in ways that can result in users silently getting - incorrect results from their previously working code. -- Backwards incompatible changes can be made, provided the benefits outweigh - the costs. -- When assessing the costs, keep in mind that most users do not read the mailing - list, do not look at deprecation warnings, and sometimes wait more than one or - two years before upgrading from their old version. And that NumPy has - many hundreds of thousands or even a couple of million users, so "no one will - do or use this" is very likely incorrect. -- Benefits include improved functionality, usability and performance (in order - of importance), as well as lower maintenance cost and improved future - extensibility. -- Bug fixes are exempt from the backwards compatibility policy. However in case - of serious impact on users (e.g. a downstream library doesn't build anymore), - even bug fixes may have to be delayed for one or more releases. -- The Python API and the C API will be treated in the same way. - - -Examples -^^^^^^^^ - -We now discuss a number of concrete examples to illustrate typical issues -and trade-offs. +In addition, this NEP can serve as documentation for users about how the NumPy +project treats backwards compatibility, and the speed at which they can expect +changes to be made. + +In scope for this NEP are: + +- Principles of NumPy's approach to backwards compatibility. +- How to deprecate functionality, and when to remove already deprecated + functionality. +- Decision making process for deprecations and removals. +- How to ensure that users are well informed about any change. + +Out of scope are: + +- Making concrete decisions about deprecations of particular functionality. +- NumPy's versioning scheme. + + +General principles +------------------ + +When considering proposed changes that are backwards incompatible, the +main principles the NumPy developers use when making a decision are: + +1. Changes need to benefit more than they harm users. +2. NumPy is widely used, so breaking changes should be assumed by default to be + harmful. +3. Decisions should be based on how they affect users and downstream packages + and should be based on usage data where possible. It does not matter whether + this use contradicts the documentation or best practices. +4. The possibility of an incorrect result is worse than an error or even crash. + +When assessing the costs of proposed changes, keep in mind that most users do +not read the mailing list, do not notice deprecation warnings, and sometimes +wait more than one or two years before upgrading from their old version. And +that NumPy has millions of users, so "no one will do or use this" is likely +incorrect. + +Benefits of proposed changes can include improved functionality, usability and +performance, as well as lower maintenance cost and improved future +extensibility. + +Fixes for clear bugs are exempt from this backwards compatibility policy. +However, in case of serious impact on users even bug fixes may have to be +delayed for one or more releases. For example, if a downstream library would no +longer build or would give incorrect results." + + +Strategies related to deprecations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Impact assessment +````````````````` + +Getting hard data on the impact of a deprecation of often difficult. Strategies +that can be used to assess such impact include: + +- Use a code search engine ([1]_, [2]_) or static ([3]_) or dynamic ([4]_) code + analysis tools to determine where and how the functionality is used. +- Test prominent downstream libraries against a development build of NumPy + containing the proposed change to get real-world data on its impact. +- Make a change in master and revert it before release if it causes problems. + We encourage other packages to test against NumPy's master branch and if + that's too burdensome, then at least to test pre-releases. This often + turns up issues quickly. + +Alternatives to deprecations +```````````````````````````` + +If the impact is unclear or significant, it is often good to consider +alternatives to deprecations. For example, discouraging use in documentation +only, or moving the documentation for the functionality to a less prominent +place or even removing it completely. Commenting on open issues related to it +that they are low-prio or labeling them as "wontfix" will also be a signal to +users, and reduce the maintenance effort needing to be spent. + + +Implementing deprecations and removals +-------------------------------------- + +Deprecation warnings are necessary in all cases where functionality +will eventually be removed. If there is no intent to remove functionality, +then it should not be deprecated. A "please don't use this for new code" +in the documentation or other type of warning should be used instead, and the +documentation can be organized such that the preferred alternative is more +prominently shown. + +Deprecations: + +- shall include the version number of the release in which the functionality + was deprecated. +- shall include information on alternatives to the deprecated functionality, or a + reason for the deprecation if no clear alternative is available. Note that + release notes can include longer messages if needed. +- shall use ``DeprecationWarning`` by default, and ``VisibleDeprecation`` + for changes that need attention again after already having been deprecated or + needing extra attention for some reason. +- shall be listed in the release notes of the release where the deprecation is + first present. +- shall not be introduced in micro (bug fix) releases. +- shall set a ``stacklevel``, so the warning appears to come from the correct + place. +- shall be mentioned in the documentation for the functionality. A + ``.. deprecated::`` directive can be used for this. + +Examples of good deprecation warnings (also note standard form of the comments +above the warning, helps when grepping): + +.. code-block:: python + + # NumPy 1.15.0, 2018-09-02 + warnings.warn('np.asscalar(a) is deprecated since NumPy 1.16.0, use ' + 'a.item() instead', DeprecationWarning, stacklevel=3) + + # NumPy 1.15.0, 2018-02-10 + warnings.warn("Importing from numpy.testing.utils is deprecated " + "since 1.15.0, import from numpy.testing instead.", + DeprecationWarning, stacklevel=2) + + # NumPy 1.14.0, 2017-07-14 + warnings.warn( + "Reading unicode strings without specifying the encoding " + "argument is deprecated since NumPy 1.14.0. Set the encoding, " + "use None for the system default.", + np.VisibleDeprecationWarning, stacklevel=2) + +.. code-block:: C + + /* DEPRECATED 2020-05-13, NumPy 1.20 */ + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + matrix_deprecation_msg, ufunc->name, "first") < 0) { + return NULL; + } + +Removal of deprecated functionality: + +- shall be done after at least 2 releases assuming the current 6-monthly + release cycle; if that changes, there shall be at least 1 year between + deprecation and removal. +- shall be listed in the release notes of the release where the removal happened. +- can be done in any minor, but not bugfix, release. + +For backwards incompatible changes that aren't "deprecate and remove" but for +which code will start behaving differently, a ``FutureWarning`` should be +used. Release notes, mentioning version number and using ``stacklevel`` should +be done in the same way as for deprecation warnings. A ``.. versionchanged::`` +directive shall be used in the documentation after the behaviour change was +made to indicate when the behavior changed: + +.. code-block:: python + + def argsort(self, axis=np._NoValue, ...): + """ + Parameters + ---------- + axis : int, optional + Axis along which to sort. If None, the default, the flattened array + is used. + + .. versionchanged:: 1.13.0 + Previously, the default was documented to be -1, but that was + in error. At some future date, the default will change to -1, as + originally intended. + Until then, the axis should be given explicitly when + ``arr.ndim > 1``, to avoid a FutureWarning. + """ + ... + warnings.warn( + "In the future the default for argsort will be axis=-1, not the " + "current None, to match its documentation and np.argsort. " + "Explicitly pass -1 or None to silence this warning.", + MaskedArrayFutureWarning, stacklevel=3) + + +Decision making +--------------- + +In concrete cases where this policy needs to be applied, decisions are made according +to the `NumPy governance model +<https://docs.scipy.org/doc/numpy/dev/governance/index.html>`_. + +All deprecations must be proposed on the mailing list in order to give everyone +with an interest in NumPy development a chance to comment. Removal of +deprecated functionality does not need discussion on the mailing list. + + +Functionality with more strict deprecation policies +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- ``numpy.random`` has its own backwards compatibility policy with additional + requirements on top of the ones in this NEP, see + `NEP 19 <http://www.numpy.org/neps/nep-0019-rng-policy.html>`_. +- The file format of ``.npy`` and ``.npz`` files is strictly versioned + independent of the NumPy version; existing format versions must remain + backwards compatible even if a newer format version is introduced. + + +Example cases +------------- + +We now discuss a few concrete examples from NumPy's history to illustrate +typical issues and trade-offs. **Changing the behavior of a function** @@ -89,21 +265,6 @@ forces users to change their code more than once, which is almost never the right thing to do. Instead, a better approach here would have been to deprecate ``histogram`` and introduce a new function ``hist`` in its place. -**Returning a view rather than a copy** - -The ``ndarray.diag`` method used to return a copy. A view would be better for -both performance and design consistency. This change was warned about -(``FutureWarning``) in v.8.0, and in v1.9.0 ``diag`` was changed to return -a *read-only* view. The planned change to a writeable view in v1.10.0 was -postponed due to backwards compatibility concerns, and is still an open issue -(gh-7661). - -What should have happened instead: nothing. This change resulted in a lot of -discussions and wasted effort, did not achieve its final goal, and was not that -important in the first place. Finishing the change to a *writeable* view in -the future is not desired, because it will result in users silently getting -different results if they upgraded multiple versions or simply missed the -warnings. **Disallowing indexing with floats** @@ -120,128 +281,30 @@ scikit-learn. Overall the change was worth the cost, and introducing it in master first to allow testing, then removing it again before a release, is a useful strategy. -Similar recent deprecations also look like good examples of +Similar deprecations that also look like good examples of cleanups/improvements: -- removing deprecated boolean indexing (gh-8312) -- deprecating truth testing on empty arrays (gh-9718) -- deprecating ``np.sum(generator)`` (gh-10670, one issue with this one is that - its warning message is wrong - this should error in the future). +- removing deprecated boolean indexing (in 2016, see `gh-8312 <https://github.com/numpy/numpy/pull/8312>`__) +- deprecating truth testing on empty arrays (in 2017, see `gh-9718 <https://github.com/numpy/numpy/pull/9718>`__) + **Removing the financial functions** -The financial functions (e.g. ``np.pmt``) are badly named, are present in the -main NumPy namespace, and don't really fit well within NumPy's scope. -They were added in 2008 after +The financial functions (e.g. ``np.pmt``) had short non-descriptive names, were +present in the main NumPy namespace, and didn't really fit well within NumPy's +scope. They were added in 2008 after `a discussion <https://mail.python.org/pipermail/numpy-discussion/2008-April/032353.html>`_ on the mailing list where opinion was divided (but a majority in favor). -At the moment these functions don't cause a lot of overhead, however there are -multiple issues and PRs a year for them which cost maintainer time to deal -with. And they clutter up the ``numpy`` namespace. Discussion in 2013 happened -on removing them again (gh-2880). - -This case is borderline, but given that they're clearly out of scope, -deprecation and removal out of at least the main ``numpy`` namespace can be -proposed. Alternatively, document clearly that new features for financial -functions are unwanted, to keep the maintenance costs to a minimum. - -**Examples of features not added because of backwards compatibility** - -TODO: do we have good examples here? Possibly subclassing related? - - -Removing complete submodules -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -This year there have been suggestions to consider removing some or all of -``numpy.distutils``, ``numpy.f2py``, ``numpy.linalg``, and ``numpy.random``. -The motivation was that all these cost maintenance effort, and that they slow -down work on the core of NumPy (ndarrays, dtypes and ufuncs). - -The impact on downstream libraries and users would be very large, and -maintenance of these modules would still have to happen. Therefore this is -simply not a good idea; removing these submodules should not happen even for -a new major version of NumPy. - - -Subclassing of ndarray -^^^^^^^^^^^^^^^^^^^^^^ - -Subclassing of ``ndarray`` is a pain point. ``ndarray`` was not (or at least -not well) designed to be subclassed. Despite that, a lot of subclasses have -been created even within the NumPy code base itself, and some of those (e.g. -``MaskedArray``, ``astropy.units.Quantity``) are quite popular. The main -problems with subclasses are: - -- They make it hard to change ``ndarray`` in ways that would otherwise be - backwards compatible. -- Some of them change the behavior of ndarray methods, making it difficult to - write code that accepts array duck-types. - -Subclassing ``ndarray`` has been officially discouraged for a long time. Of -the most important subclasses, ``np.matrix`` will be deprecated (see gh-10142) -and ``MaskedArray`` will be kept in NumPy (`NEP 17 -<http://www.numpy.org/neps/nep-0017-split-out-maskedarray.html>`_). -``MaskedArray`` will ideally be rewritten in a way such that it uses only -public NumPy APIs. For subclasses outside of NumPy, more work is needed to -provide alternatives (e.g. mixins, see gh-9016 and gh-10446) or better support -for custom dtypes (see gh-2899). Until that is done, subclasses need to be -taken into account when making change to the NumPy code base. A future change -in NumPy to not support subclassing will certainly need a major version -increase. - - -Policy ------- - -1. Code changes that have the potential to silently change the results of a users' - code must never be made (except in the case of clear bugs). -2. Code changes that break users' code (i.e. the user will see a clear exception) - can be made, *provided the benefit is worth the cost* and suitable deprecation - warnings have been raised first. -3. Deprecation warnings are in all cases warnings that functionality will be removed. - If there is no intent to remove functionality, then deprecation in documentation - only or other types of warnings shall be used. -4. Deprecations for stylistic reasons (e.g. consistency between functions) are - strongly discouraged. - -Deprecations: - -- shall include the version numbers of both when the functionality was deprecated - and when it will be removed (either two releases after the warning is - introduced, or in the next major version). -- shall include information on alternatives to the deprecated functionality, or a - reason for the deprecation if no clear alternative is available. -- shall use ``VisibleDeprecationWarning`` rather than ``DeprecationWarning`` - for cases of relevance to end users (as opposed to cases only relevant to - libraries building on top of NumPy). -- shall be listed in the release notes of the release where the deprecation happened. - -Removal of deprecated functionality: +The financial functions didn't cause a lot of overhead, however there were +still multiple issues and PRs a year for them which cost maintainer time to +deal with. And they cluttered up the ``numpy`` namespace. Discussion on +removing them was discussed in 2013 (gh-2880, rejected) and in 2019 +(:ref:`NEP32`, accepted without significant complaints). -- shall be done after 2 releases (assuming a 6-monthly release cycle; if that changes, - there shall be at least 1 year between deprecation and removal), unless the - impact of the removal is such that a major version number increase is - warranted. -- shall be listed in the release notes of the release where the removal happened. - -Versioning: - -- removal of deprecated code can be done in any minor (but not bugfix) release. -- for heavily used functionality (e.g. removal of ``np.matrix``, of a whole submodule, - or significant changes to behavior for subclasses) the major version number shall - be increased. - -In concrete cases where this policy needs to be applied, decisions are made according -to the `NumPy governance model -<https://docs.scipy.org/doc/numpy/dev/governance/index.html>`_. - -Functionality with more strict policies: - -- ``numpy.random`` has its own backwards compatibility policy, - see `NEP 19 <http://www.numpy.org/neps/nep-0019-rng-policy.html>`_. -- The file format for ``.npy`` and ``.npz`` files must not be changed in a backwards - incompatible way. +Given that they were clearly outside of NumPy's scope, moving them to a +separate ``numpy-financial`` package and removing them from NumPy after a +deprecation period made sense. That also gave users an easy way to update +their code by doing `pip install numpy-financial`. Alternatives @@ -257,34 +320,29 @@ ecosystem - being fairly conservative is required in order to not increase the extra maintenance for downstream libraries and end users to an unacceptable level. -**Semantic versioning.** - -This would change the versioning scheme for code removals; those could then -only be done when the major version number is increased. Rationale for -rejection: semantic versioning is relatively common in software engineering, -however it is not at all common in the Python world. Also, it would mean that -NumPy's version number simply starts to increase faster, which would be more -confusing than helpful. gh-10156 contains more discussion on this alternative. - Discussion ---------- -TODO - -This section may just be a bullet list including links to any discussions -regarding the NEP: - -- This includes links to mailing list threads or relevant GitHub issues. +- `Mailing list discussion on the first version of this NEP in 2018 <https://mail.python.org/pipermail/numpy-discussion/2018-July/078432.html>`__ References and Footnotes ------------------------ -.. [1] TODO +- `Issue requesting semantic versioning <https://github.com/numpy/numpy/issues/10156>`__ + +- `PEP 387 - Backwards Compatibility Policy <https://www.python.org/dev/peps/pep-0387/>`__ + +.. [1] https://searchcode.com/ + +.. [2] https://sourcegraph.com/search + +.. [3] https://github.com/Quansight-Labs/python-api-inspect +.. [4] https://github.com/data-apis/python-record-api Copyright --------- -This document has been placed in the public domain. [1]_ +This document has been placed in the public domain. diff --git a/doc/neps/nep-0046-sponsorship-guidelines.rst b/doc/neps/nep-0046-sponsorship-guidelines.rst new file mode 100644 index 000000000..cc273ce2c --- /dev/null +++ b/doc/neps/nep-0046-sponsorship-guidelines.rst @@ -0,0 +1,255 @@ +.. _NEP46: + +===================================== +NEP 46 — NumPy Sponsorship Guidelines +===================================== + +:Author: Ralf Gommers <ralf.gommers@gmail.com> +:Status: Draft +:Type: Process +:Created: 2020-12-27 +:Resolution: <url> (required for Accepted | Rejected | Withdrawn) + + +Abstract +-------- + +This NEP provides guidelines on how the NumPy project will acknowledge +financial and in-kind support. + + +Motivation and Scope +-------------------- + +In the past few years, the NumPy project has gotten significant financial +support, as well as dedicated work time for maintainers to work on NumPy. There +is a need to acknowledge that support - it's the right thing to do, it's +helpful when looking for new funding, and funders and organizations expect or +require it, Furthermore, having a clear policy for how NumPy acknowledges +support is helpful when searching for new support. Finally, this policy may +help set reasonable expectations for potential funders. + +This NEP is aimed at both the NumPy community - who can use it as a guideline +when looking for support on behalf of the project and when acknowledging +existing support - and at past, current and prospective sponsors, who often +want or need to know what they get in return for their support other than a +healthier NumPy. + +The scope of this proposal includes: + +- direct financial support, employers providing paid time for NumPy maintainers + and regular contributors, and in-kind support such as free hardware resources or + services, +- where and how NumPy acknowledges support (e.g., logo placement on the website), +- the amount and duration of support which leads to acknowledgement, and +- who in the NumPy project is responsible for sponsorship related topics, and + how to contact them. + + +How NumPy will acknowledge support +---------------------------------- + +There will be two different ways to acknowledge financial and in-kind support: +one to recognize significant active support, and another one to recognize +support received in the past and smaller amounts of support. + +Entities who fall under "significant active supporter" we'll call Sponsor. +The minimum level of support given to NumPy to be considered a Sponsor are: + +- $30,000/yr for unrestricted financial contributions (e.g., donations) +- $60,000/yr for financial contributions for a particular purpose (e.g., grants) +- $100,000/yr for in-kind contributions (e.g., time for employees to contribute) + +We define support being active as: + +- for a one-off donation: it was received within the previous 12 months, +- for recurring or financial or in-kind contributions: they should be ongoing. + +After support moves from "active" to "inactive" status, the acknowledgement +will be left in its place for at least another 6 months. If appropriate, the +funding team can discuss opportunities for renewal with the sponsor. After +those 6 months, acknowledgement may be moved to the historical overview. The +exact timing of this move is at the discretion of the funding team, because +there may be reasons to keep it in the more prominent place for longer. + +The rationale for the above funding levels is that unrestricted financial +contributions are typically the most valuable for the project, and the hardest +to obtain. The opposite is true for in-kind contributions. The dollar value of +the levels also reflect that NumPy's needs have grown to the point where we +need multiple paid developers in order to effectively support our user base and +continue to move the project forward. Financial support at or above these +levels is needed to be able to make a significant difference. + +Sponsors will get acknowledged through: + +- a small logo displayed on the front page of the NumPy website +- prominent logo placement on https://numpy.org/about/ +- logos displayed in talks about NumPy by maintainers +- announcements of the sponsorship on the NumPy mailing list and the numpy-team + Twitter account + +In addition to Sponsors, we already have the concept of Institutional Partner +(defined in NumPy's +`governance document <https://numpy.org/devdocs/dev/governance/index.html>`__), +for entities who employ a NumPy maintainer and let them work on NumPy as part +of their official duties. The governance document doesn't currently define a +minimum amount of paid maintainer time needed to be considered for partnership. +Therefore we propose that level here, roughly in line with the sponsorship +levels: + +- 6 person-months/yr of paid work time for one or more NumPy maintainers or + regular contributors to any NumPy team or activity + +Institutional Partners get the same benefits as Sponsors, in addition to what +is specified in the NumPy governance document. + +Finally, a new page on the website (https://numpy.org/funding/, linked from the +About page) will be added to acknowledge all current and previous sponsors, +partners, and any other entities and individuals who provided $5,000 or more of +financial or in-kind support. This page will include relevant details of +support (dates, amounts, names, and purpose); no logos will be used on this +page. The rationale for the $5,000 minimum level is to keep the amount of work +maintaining the page reasonable; the level is the equivalent of, e.g., one GSoC +or a person-week's worth of engineering time in a Western country, which seems +like a reasonable lower limit. + + +Implementation +-------------- + +The following content changes need to be made: + +- Add a section with small logos towards the bottom of the `numpy.org + <https://numpy.org/>`__ website. +- Create a full list of historical and current support and deploy it to + https://numpy.org/funding. +- Update the NumPy governance document for changes to Institutional Partner + eligibility requirements and benefits. +- Update https://numpy.org/about with details on how to get in touch with the + NumPy project about sponsorship related matters (see next section). + + +NumPy Funding Team +~~~~~~~~~~~~~~~~~~ + +At the moment NumPy has only one official body, the Steering Council, and no +good way to get in touch with either that body or any person or group +responsible for funding and sponsorship related matters. The way this is +typically done now is to somehow find the personal email of a maintainer, and +email them in private. There is a need to organize this more transparently - a +potential sponsor isn't likely to inquire through the mailing list, nor is it +easy for a potential sponsor to know if they're reaching out to the right +person in private. + +https://numpy.org/about/ already says that NumPy has a "funding and grants" +team. However that is not the case. We propose to organize this team, name team +members on it, and add the names of those team members plus a dedicated email +address for the team to the About page. + + +Status before this proposal +--------------------------- + +Acknowledgement of support +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +At the time of writing (Dec 2020), the logos of the four largest financial +sponsors and two institutional partners are displayed on +https://numpy.org/about/. The `Nature paper about NumPy <https://www.nature.com/articles/s41586-020-2649-2>`__ +mentions some early funding. No comprehensive list of received funding and +in-kind support is published anywhere. + +Decisions on which logos to list on the website have been made mostly by the +website team. Decisions on which entities to recognize as Institutional Partner +have been made by the NumPy Steering Council. + + +NumPy governance, decision-making, and financial oversight +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +*This section is meant as context for the reader, to help put the rest of this +NEP in perspective, and perhaps answer questions the reader has when reading +this as a potential sponsor.* + +NumPy has a formal governance structure defined in +`this governance document <https://numpy.org/devdocs/dev/governance/index.html>`__). +Decisions are made by consensus among all active participants in a discussion +(typically on the mailing list), and if consensus cannot be reached then the +Steering Council takes the decision (also by consensus). + +NumPy is a sponsored project of NumFOCUS, a US-based 501(c)3 nonprofit. +NumFOCUS administers NumPy funds, and ensures they are spent in accordance with +its mission and nonprofit status. In practice, NumPy has a NumFOCUS +subcommittee (with its members named in the NumPy governance document) who can +authorize financial transactions. Those transactions, for example paying a +contractor for a particular activity or deliverable, are decided on by the +NumPy Steering Council. + + +Alternatives +------------ + +*Tiered sponsorship levels.* We considered using tiered sponsorship levels, and +rejected this alternative because it would be more complex, and not necessarily +communicate the right intent - the minimum levels are for us to determine how +to acknowledge support that we receive, not a commercial value proposition. +Entities typically will support NumPy because they rely on the project or want +to help advance it, and not to get brand awareness through logo placement. + +*Listing all donations*. Note that in the past we have received many smaller +donations, mostly from individuals through NumFOCUS. It would be great to list +all of those contributions, but given the way we receive information on those +donations right now, that would be quite labor-intensive. If we manage to move +to a more suitable platform, such as `Open Collective <https://opencollective.com/>`__, +in the future, we should reconsider listing all individual donations. + + +Related Work +------------ + +Here we provide a few examples of how other projects handle sponsorship +guidelines and acknowledgements. + +*Scikit-learn* has a narrow banner with logos at the bottom of +https://scikit-learn.org, and a list of present funding and past sponsors at +https://scikit-learn.org/stable/about.html#funding. Plus a separate section +"Infrastructure support" at the bottom of that same About page. + +*Jupyter* has logos of sponsors and institutional partners in two sections on +https://jupyter.org/about. Some subprojects have separate approaches, for +example sponsors are listed (by using the `all-contributors +<https://github.com/all-contributors/all-contributors>`__ bot) in the README for +`jupyterlab-git <https://github.com/jupyterlab/jupyterlab-git>`__. For a recent +discussion on that, see `here <jupyterlab-git acknowledgements discussion>`_. + +*NumFOCUS* has a large banner with sponsor logos on its front page at +https://numfocus.org, and a full page with sponsors at different sponsorship +levels listed at https://numfocus.org/sponsors. They also have a +`Corporate Sponsorship Prospectus <https://numfocus.org/blog/introducing-our-newest-corporate-sponsorship-prospectus>`__, +which includes a lot of detail on both sponsorship levels and benefits, as well +as how that helps NumFOCUS-affiliated projects (including NumPy). + + +Discussion +---------- + +Mailing list thread(s) discussing this NEP: TODO + + +References and Footnotes +------------------------ + +- `Inside NumPy: preparing for the next decade <https://github.com/numpy/archive/blob/master/content/inside_numpy_presentation_SciPy2019.pdf>`__ presentation at SciPy'19 discussing the impact of the first NumPy grant. +- `Issue <https://github.com/numpy/numpy/issues/13393>`__ and + `email <https://mail.python.org/pipermail/numpy-discussion/2019-April/079371.html>`__ + where IBM offered a $5,000 bounty for VSX SIMD support +- `JupyterLab Corporate Engagement and Contribution Guide <https://github.com/jupyterlab/jupyterlab/blob/master/CORPORATE.md>`__ + + +.. _jupyterlab-git acknowledgements discussion: https://github.com/jupyterlab/jupyterlab-git/pull/530 + + +Copyright +--------- + +This document has been placed in the public domain. diff --git a/doc_requirements.txt b/doc_requirements.txt index 64ab9a41f..26be985bb 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -4,4 +4,4 @@ ipython scipy matplotlib pandas -pydata-sphinx-theme==0.4.1 +pydata-sphinx-theme==0.4.2 diff --git a/numpy/__init__.py b/numpy/__init__.py index a242bb7df..7dadb6491 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -109,8 +109,9 @@ Exceptions to this rule are documented. import sys import warnings -from ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning -from ._globals import _NoValue +from ._globals import ( + ModuleDeprecationWarning, VisibleDeprecationWarning, _NoValue +) # We first need to detect if we're being called as part of the numpy setup # procedure itself in a reliable manner. @@ -397,4 +398,3 @@ else: from ._version import get_versions __version__ = get_versions()['version'] del get_versions - diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index dbf807783..3d92a543b 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -9,8 +9,16 @@ from numpy.core._internal import _ctypes from numpy.typing import ( # Arrays ArrayLike, + _ArrayND, + _ArrayOrScalar, + _NestedSequence, + _RecursiveSequence, + _ArrayLikeNumber_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, # DTypes + DTypeLike, _SupportsDType, _VoidDTypeLike, @@ -127,6 +135,7 @@ from typing import ( Iterable, List, Mapping, + NoReturn, Optional, overload, Sequence, @@ -584,19 +593,19 @@ where: Any who: Any _NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray) -_DTypeScalar = TypeVar("_DTypeScalar", bound=generic) +_DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) _ByteOrder = Literal["S", "<", ">", "=", "|", "L", "B", "N", "I"] -class dtype(Generic[_DTypeScalar]): +class dtype(Generic[_DTypeScalar_co]): names: Optional[Tuple[str, ...]] # Overload for subclass of generic @overload def __new__( cls, - dtype: Type[_DTypeScalar], + dtype: Type[_DTypeScalar_co], align: bool = ..., copy: bool = ..., - ) -> dtype[_DTypeScalar]: ... + ) -> dtype[_DTypeScalar_co]: ... # Overloads for string aliases, Python types, and some assorted # other special cases. Order is sometimes important because of the # subtype relationships @@ -711,10 +720,10 @@ class dtype(Generic[_DTypeScalar]): @overload def __new__( cls, - dtype: dtype[_DTypeScalar], + dtype: dtype[_DTypeScalar_co], align: bool = ..., copy: bool = ..., - ) -> dtype[_DTypeScalar]: ... + ) -> dtype[_DTypeScalar_co]: ... # TODO: handle _SupportsDType better @overload def __new__( @@ -791,7 +800,7 @@ class dtype(Generic[_DTypeScalar]): @property def str(self) -> builtins.str: ... @property - def type(self) -> Type[_DTypeScalar]: ... + def type(self) -> Type[_DTypeScalar_co]: ... class _flagsobj: aligned: bool @@ -1319,6 +1328,7 @@ class _ArrayOrScalarCommon: ) -> _NdArraySubClass: ... _DType = TypeVar("_DType", bound=dtype[Any]) +_DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) # TODO: Set the `bound` to something more suitable once we # have proper shape support @@ -1327,7 +1337,7 @@ _ShapeType = TypeVar("_ShapeType", bound=Any) _BufferType = Union[ndarray, bytes, bytearray, memoryview] _Casting = Literal["no", "equiv", "safe", "same_kind", "unsafe"] -class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType]): +class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @property def base(self) -> Optional[ndarray]: ... @property @@ -1352,7 +1362,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType]): order: _OrderKACF = ..., ) -> _ArraySelf: ... @overload - def __array__(self, __dtype: None = ...) -> ndarray[Any, _DType]: ... + def __array__(self, __dtype: None = ...) -> ndarray[Any, _DType_co]: ... @overload def __array__(self, __dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ... @property @@ -1464,10 +1474,77 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType]): def __iter__(self) -> Any: ... def __contains__(self, key) -> bool: ... def __index__(self) -> int: ... - def __lt__(self, other: ArrayLike) -> Union[ndarray, bool_]: ... - def __le__(self, other: ArrayLike) -> Union[ndarray, bool_]: ... - def __gt__(self, other: ArrayLike) -> Union[ndarray, bool_]: ... - def __ge__(self, other: ArrayLike) -> Union[ndarray, bool_]: ... + + # The last overload is for catching recursive objects whose + # nesting is too deep. + # The first overload is for catching `bytes` (as they are a subtype of + # `Sequence[int]`) and `str`. As `str` is a recusive sequence of + # strings, it will pass through the final overload otherwise + + @overload + def __lt__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __lt__(self: _ArrayND[Union[number[Any], bool_]], other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __lt__(self: _ArrayND[Union[bool_, integer[Any], timedelta64]], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __lt__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __lt__(self: _ArrayND[object_], other: Any) -> _ArrayOrScalar[bool_]: ... + @overload + def __lt__( + self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], + other: _RecursiveSequence, + ) -> _ArrayOrScalar[bool_]: ... + + @overload + def __le__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __le__(self: _ArrayND[Union[number[Any], bool_]], other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __le__(self: _ArrayND[Union[bool_, integer[Any], timedelta64]], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __le__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __le__(self: _ArrayND[object_], other: Any) -> _ArrayOrScalar[bool_]: ... + @overload + def __le__( + self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], + other: _RecursiveSequence, + ) -> _ArrayOrScalar[bool_]: ... + + @overload + def __gt__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __gt__(self: _ArrayND[Union[number[Any], bool_]], other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __gt__(self: _ArrayND[Union[bool_, integer[Any], timedelta64]], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __gt__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __gt__(self: _ArrayND[object_], other: Any) -> _ArrayOrScalar[bool_]: ... + @overload + def __gt__( + self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], + other: _RecursiveSequence, + ) -> _ArrayOrScalar[bool_]: ... + + @overload + def __ge__(self: _ArrayND[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __ge__(self: _ArrayND[Union[number[Any], bool_]], other: _ArrayLikeNumber_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __ge__(self: _ArrayND[Union[bool_, integer[Any], timedelta64]], other: _ArrayLikeTD64_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __ge__(self: _ArrayND[datetime64], other: _ArrayLikeDT64_co) -> _ArrayOrScalar[bool_]: ... + @overload + def __ge__(self: _ArrayND[object_], other: Any) -> _ArrayOrScalar[bool_]: ... + @overload + def __ge__( + self: _ArrayND[Union[number[Any], datetime64, timedelta64, bool_]], + other: _RecursiveSequence, + ) -> _ArrayOrScalar[bool_]: ... + def __matmul__(self, other: ArrayLike) -> Any: ... # NOTE: `ndarray` does not implement `__imatmul__` def __rmatmul__(self, other: ArrayLike) -> Any: ... @@ -1516,7 +1593,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType]): def __ior__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property - def dtype(self) -> _DType: ... + def dtype(self) -> _DType_co: ... # NOTE: while `np.generic` is not technically an instance of `ABCMeta`, # the `@abstractmethod` decorator is herein used to (forcefully) deny @@ -1586,10 +1663,10 @@ class number(generic, Generic[_NBit1]): # type: ignore __rpow__: _NumberOp __truediv__: _NumberOp __rtruediv__: _NumberOp - __lt__: _ComparisonOp[_NumberLike_co] - __le__: _ComparisonOp[_NumberLike_co] - __gt__: _ComparisonOp[_NumberLike_co] - __ge__: _ComparisonOp[_NumberLike_co] + __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] class bool_(generic): def __init__(self, __value: object = ...) -> None: ... @@ -1628,10 +1705,10 @@ class bool_(generic): __rmod__: _BoolMod __divmod__: _BoolDivMod __rdivmod__: _BoolDivMod - __lt__: _ComparisonOp[_NumberLike_co] - __le__: _ComparisonOp[_NumberLike_co] - __gt__: _ComparisonOp[_NumberLike_co] - __ge__: _ComparisonOp[_NumberLike_co] + __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] class object_(generic): def __init__(self, __value: object = ...) -> None: ... @@ -1660,10 +1737,10 @@ class datetime64(generic): @overload def __sub__(self, other: _TD64Like_co) -> datetime64: ... def __rsub__(self, other: datetime64) -> timedelta64: ... - __lt__: _ComparisonOp[datetime64] - __le__: _ComparisonOp[datetime64] - __gt__: _ComparisonOp[datetime64] - __ge__: _ComparisonOp[datetime64] + __lt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + __le__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + __gt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + __ge__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] # Support for `__index__` was added in python 3.8 (bpo-20092) if sys.version_info >= (3, 8): @@ -1762,10 +1839,10 @@ class timedelta64(generic): def __rmod__(self, other: timedelta64) -> timedelta64: ... def __divmod__(self, other: timedelta64) -> Tuple[int64, timedelta64]: ... def __rdivmod__(self, other: timedelta64) -> Tuple[int64, timedelta64]: ... - __lt__: _ComparisonOp[Union[timedelta64, _IntLike_co, _BoolLike_co]] - __le__: _ComparisonOp[Union[timedelta64, _IntLike_co, _BoolLike_co]] - __gt__: _ComparisonOp[Union[timedelta64, _IntLike_co, _BoolLike_co]] - __ge__: _ComparisonOp[Union[timedelta64, _IntLike_co, _BoolLike_co]] + __lt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + __le__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + __gt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + __ge__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] class unsignedinteger(integer[_NBit1]): # NOTE: `uint64 + signedinteger -> float64` diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index 2cbfe52be..6073166a0 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -377,7 +377,7 @@ add_newdoc('numpy.core', 'nditer', ... while not it.finished: ... it[0] = lamdaexpr(*it[1:]) ... it.iternext() - ... return it.operands[0] + ... return it.operands[0] >>> a = np.arange(5) >>> b = np.ones(5) @@ -821,7 +821,7 @@ add_newdoc('numpy.core.multiarray', 'array', ===== ========= =================================================== When ``copy=False`` and a copy is made for other reasons, the result is - the same as if ``copy=True``, with some exceptions for `A`, see the + the same as if ``copy=True``, with some exceptions for 'A', see the Notes section. The default order is 'K'. subok : bool, optional If True, then sub-classes will be passed-through, otherwise diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 52df1aad9..c8de48ff8 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -319,34 +319,34 @@ def choose(a, choices, out=None, mode='raise'): But this omits some subtleties. Here is a fully general summary: - Given an "index" array (`a`) of integers and a sequence of `n` arrays + Given an "index" array (`a`) of integers and a sequence of ``n`` arrays (`choices`), `a` and each choice array are first broadcast, as necessary, to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` - for each `i`. Then, a new array with shape ``Ba.shape`` is created as + for each ``i``. Then, a new array with shape ``Ba.shape`` is created as follows: - * if ``mode=raise`` (the default), then, first of all, each element of - `a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that - `i` (in that range) is the value at the `(j0, j1, ..., jm)` position - in `Ba` - then the value at the same position in the new array is the - value in `Bchoices[i]` at that same position; + * if ``mode='raise'`` (the default), then, first of all, each element of + ``a`` (and thus ``Ba``) must be in the range ``[0, n-1]``; now, suppose + that ``i`` (in that range) is the value at the ``(j0, j1, ..., jm)`` + position in ``Ba`` - then the value at the same position in the new array + is the value in ``Bchoices[i]`` at that same position; - * if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed) + * if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed) integer; modular arithmetic is used to map integers outside the range `[0, n-1]` back into that range; and then the new array is constructed as above; - * if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed) - integer; negative integers are mapped to 0; values greater than `n-1` - are mapped to `n-1`; and then the new array is constructed as above. + * if ``mode='clip'``, values in `a` (and thus ``Ba``) may be any (signed) + integer; negative integers are mapped to 0; values greater than ``n-1`` + are mapped to ``n-1``; and then the new array is constructed as above. Parameters ---------- a : int array - This array must contain integers in `[0, n-1]`, where `n` is the number - of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any - integers are permissible. + This array must contain integers in ``[0, n-1]``, where ``n`` is the + number of choices, unless ``mode=wrap`` or ``mode=clip``, in which + cases any integers are permissible. choices : sequence of arrays Choice arrays. `a` and all of the choices must be broadcastable to the same shape. If `choices` is itself an array (not recommended), then @@ -355,12 +355,12 @@ def choose(a, choices, out=None, mode='raise'): out : array, optional If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. Note that `out` is always - buffered if `mode='raise'`; use other modes for better performance. + buffered if ``mode='raise'``; use other modes for better performance. mode : {'raise' (default), 'wrap', 'clip'}, optional - Specifies how indices outside `[0, n-1]` will be treated: + Specifies how indices outside ``[0, n-1]`` will be treated: * 'raise' : an exception is raised - * 'wrap' : value becomes value mod `n` + * 'wrap' : value becomes value mod ``n`` * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 Returns diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index c95c48d71..e776bd43b 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -299,7 +299,7 @@ def full(shape, fill_value, dtype=None, order='C', *, like=None): Fill value. dtype : data-type, optional The desired data-type for the array The default, None, means - `np.array(fill_value).dtype`. + ``np.array(fill_value).dtype``. order : {'C', 'F'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 58571b678..ef105ff2d 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -2124,7 +2124,16 @@ PyArray_FromInterface(PyObject *origin) if (iface == NULL) { if (PyErr_Occurred()) { - return NULL; + if (PyErr_ExceptionMatches(PyExc_RecursionError) || + PyErr_ExceptionMatches(PyExc_MemoryError)) { + /* RecursionError and MemoryError are considered fatal */ + return NULL; + } + /* + * This probably be deprecated, but at least shapely raised + * a NotImplementedError expecting it to be cleared (gh-17965) + */ + PyErr_Clear(); } return Py_NotImplemented; } @@ -2392,7 +2401,13 @@ PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject *context) array_meth = PyArray_LookupSpecial_OnInstance(op, "__array__"); if (array_meth == NULL) { if (PyErr_Occurred()) { - return NULL; + if (PyErr_ExceptionMatches(PyExc_RecursionError) || + PyErr_ExceptionMatches(PyExc_MemoryError)) { + /* RecursionError and MemoryError are considered fatal */ + return NULL; + } + /* This probably be deprecated. */ + PyErr_Clear(); } return Py_NotImplemented; } diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src index d1b76de4e..333b8e188 100644 --- a/numpy/core/src/multiarray/einsum_sumprod.c.src +++ b/numpy/core/src/multiarray/einsum_sumprod.c.src @@ -20,28 +20,6 @@ #include "simd/simd.h" #include "common.h" -#ifdef NPY_HAVE_SSE_INTRINSICS -#define EINSUM_USE_SSE1 1 -#else -#define EINSUM_USE_SSE1 0 -#endif - -#ifdef NPY_HAVE_SSE2_INTRINSICS -#define EINSUM_USE_SSE2 1 -#else -#define EINSUM_USE_SSE2 0 -#endif - -#if EINSUM_USE_SSE1 -#include <xmmintrin.h> -#endif - -#if EINSUM_USE_SSE2 -#include <emmintrin.h> -#endif - -#define EINSUM_IS_SSE_ALIGNED(x) ((((npy_intp)x)&0xf) == 0) - // ARM/Neon don't have instructions for aligned memory access #ifdef NPY_HAVE_NEON #define EINSUM_IS_ALIGNED(x) 0 @@ -311,6 +289,77 @@ finish_after_unrolled_loop: #elif @nop@ == 2 && !@complex@ +// calculate the multiply and add operation such as dataout = data*scalar+dataout +static NPY_GCC_OPT_3 void +@name@_sum_of_products_muladd(@type@ *data, @type@ *data_out, @temptype@ scalar, npy_intp count) +{ +#if @NPYV_CHK@ // NPYV check for @type@ + /* Use aligned instructions if possible */ + const int is_aligned = EINSUM_IS_ALIGNED(data) && EINSUM_IS_ALIGNED(data_out); + const int vstep = npyv_nlanes_@sfx@; + const npyv_@sfx@ v_scalar = npyv_setall_@sfx@(scalar); + /**begin repeat2 + * #cond = if(is_aligned), else# + * #ld = loada, load# + * #st = storea, store# + */ + @cond@ { + const npy_intp vstepx4 = vstep * 4; + for (; count >= vstepx4; count -= vstepx4, data += vstepx4, data_out += vstepx4) { + /**begin repeat3 + * #i = 0, 1, 2, 3# + */ + npyv_@sfx@ b@i@ = npyv_@ld@_@sfx@(data + vstep * @i@); + npyv_@sfx@ c@i@ = npyv_@ld@_@sfx@(data_out + vstep * @i@); + /**end repeat3**/ + /**begin repeat3 + * #i = 0, 1, 2, 3# + */ + npyv_@sfx@ abc@i@ = npyv_muladd_@sfx@(v_scalar, b@i@, c@i@); + /**end repeat3**/ + /**begin repeat3 + * #i = 0, 1, 2, 3# + */ + npyv_@st@_@sfx@(data_out + vstep * @i@, abc@i@); + /**end repeat3**/ + } + } + /**end repeat2**/ + for (; count > 0; count -= vstep, data += vstep, data_out += vstep) { + npyv_@sfx@ a = npyv_load_tillz_@sfx@(data, count); + npyv_@sfx@ b = npyv_load_tillz_@sfx@(data_out, count); + npyv_store_till_@sfx@(data_out, count, npyv_muladd_@sfx@(a, v_scalar, b)); + } + npyv_cleanup(); +#else +#ifndef NPY_DISABLE_OPTIMIZATION + for (; count >= 4; count -= 4, data += 4, data_out += 4) { + /**begin repeat2 + * #i = 0, 1, 2, 3# + */ + const @type@ b@i@ = @from@(data[@i@]); + const @type@ c@i@ = @from@(data_out[@i@]); + /**end repeat2**/ + /**begin repeat2 + * #i = 0, 1, 2, 3# + */ + const @type@ abc@i@ = scalar * b@i@ + c@i@; + /**end repeat2**/ + /**begin repeat2 + * #i = 0, 1, 2, 3# + */ + data_out[@i@] = @to@(abc@i@); + /**end repeat2**/ + } +#endif // !NPY_DISABLE_OPTIMIZATION + for (; count > 0; --count, ++data, ++data_out) { + const @type@ b = @from@(*data); + const @type@ c = @from@(*data_out); + *data_out = @to@(scalar * b + c); + } +#endif // NPYV check for @type@ +} + static void @name@_sum_of_products_contig_two(int nop, char **dataptr, npy_intp const *NPY_UNUSED(strides), npy_intp count) @@ -403,242 +452,23 @@ static void @type@ *data1 = (@type@ *)dataptr[1]; @type@ *data_out = (@type@ *)dataptr[2]; -#if EINSUM_USE_SSE1 && @float32@ - __m128 a, b, value0_sse; -#elif EINSUM_USE_SSE2 && @float64@ - __m128d a, b, value0_sse; -#endif - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_stride0_contig_outcontig_two (%d)\n", (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: - data_out[@i@] = @to@(value0 * - @from@(data1[@i@]) + - @from@(data_out[@i@])); -/**end repeat2**/ - case 0: - return; - } - -#if EINSUM_USE_SSE1 && @float32@ - value0_sse = _mm_set_ps1(value0); - - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data1) && EINSUM_IS_SSE_ALIGNED(data_out)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 4# - */ - a = _mm_mul_ps(value0_sse, _mm_load_ps(data1+@i@)); - b = _mm_add_ps(a, _mm_load_ps(data_out+@i@)); - _mm_store_ps(data_out+@i@, b); -/**end repeat2**/ - data1 += 8; - data_out += 8; - } - - /* Finish off the loop */ - if (count > 0) { - goto finish_after_unrolled_loop; - } - else { - return; - } - } -#elif EINSUM_USE_SSE2 && @float64@ - value0_sse = _mm_set1_pd(value0); - - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data1) && EINSUM_IS_SSE_ALIGNED(data_out)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - a = _mm_mul_pd(value0_sse, _mm_load_pd(data1+@i@)); - b = _mm_add_pd(a, _mm_load_pd(data_out+@i@)); - _mm_store_pd(data_out+@i@, b); -/**end repeat2**/ - data1 += 8; - data_out += 8; - } - - /* Finish off the loop */ - if (count > 0) { - goto finish_after_unrolled_loop; - } - else { - return; - } - } -#endif - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -#if EINSUM_USE_SSE1 && @float32@ -/**begin repeat2 - * #i = 0, 4# - */ - a = _mm_mul_ps(value0_sse, _mm_loadu_ps(data1+@i@)); - b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@)); - _mm_storeu_ps(data_out+@i@, b); -/**end repeat2**/ -#elif EINSUM_USE_SSE2 && @float64@ -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - a = _mm_mul_pd(value0_sse, _mm_loadu_pd(data1+@i@)); - b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@)); - _mm_storeu_pd(data_out+@i@, b); -/**end repeat2**/ -#else -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - data_out[@i@] = @to@(value0 * - @from@(data1[@i@]) + - @from@(data_out[@i@])); -/**end repeat2**/ -#endif - data1 += 8; - data_out += 8; - } - - /* Finish off the loop */ - if (count > 0) { - goto finish_after_unrolled_loop; - } + @name@_sum_of_products_muladd(data1, data_out, value0, count); + } static void @name@_sum_of_products_contig_stride0_outcontig_two(int nop, char **dataptr, npy_intp const *NPY_UNUSED(strides), npy_intp count) { - @type@ *data0 = (@type@ *)dataptr[0]; @temptype@ value1 = @from@(*(@type@ *)dataptr[1]); + @type@ *data0 = (@type@ *)dataptr[0]; @type@ *data_out = (@type@ *)dataptr[2]; -#if EINSUM_USE_SSE1 && @float32@ - __m128 a, b, value1_sse; -#elif EINSUM_USE_SSE2 && @float64@ - __m128d a, b, value1_sse; -#endif - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_stride0_outcontig_two (%d)\n", (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: - data_out[@i@] = @to@(@from@(data0[@i@])* - value1 + - @from@(data_out[@i@])); -/**end repeat2**/ - case 0: - return; - } - -#if EINSUM_USE_SSE1 && @float32@ - value1_sse = _mm_set_ps1(value1); - - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data_out)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 4# - */ - a = _mm_mul_ps(_mm_load_ps(data0+@i@), value1_sse); - b = _mm_add_ps(a, _mm_load_ps(data_out+@i@)); - _mm_store_ps(data_out+@i@, b); -/**end repeat2**/ - data0 += 8; - data_out += 8; - } - - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#elif EINSUM_USE_SSE2 && @float64@ - value1_sse = _mm_set1_pd(value1); - - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data_out)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - a = _mm_mul_pd(_mm_load_pd(data0+@i@), value1_sse); - b = _mm_add_pd(a, _mm_load_pd(data_out+@i@)); - _mm_store_pd(data_out+@i@, b); -/**end repeat2**/ - data0 += 8; - data_out += 8; - } - - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#endif - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -#if EINSUM_USE_SSE1 && @float32@ -/**begin repeat2 - * #i = 0, 4# - */ - a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), value1_sse); - b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@)); - _mm_storeu_ps(data_out+@i@, b); -/**end repeat2**/ -#elif EINSUM_USE_SSE2 && @float64@ -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), value1_sse); - b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@)); - _mm_storeu_pd(data_out+@i@, b); -/**end repeat2**/ -#else -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - data_out[@i@] = @to@(@from@(data0[@i@])* - value1 + - @from@(data_out[@i@])); -/**end repeat2**/ -#endif - data0 += 8; - data_out += 8; - } - - /* Finish off the loop */ - goto finish_after_unrolled_loop; + @name@_sum_of_products_muladd(data0, data_out, value1, count); } static NPY_GCC_OPT_3 void diff --git a/numpy/core/tests/test_array_coercion.py b/numpy/core/tests/test_array_coercion.py index 95159e1ad..45c792ad2 100644 --- a/numpy/core/tests/test_array_coercion.py +++ b/numpy/core/tests/test_array_coercion.py @@ -703,17 +703,19 @@ class TestArrayLikes: @pytest.mark.parametrize("attribute", ["__array_interface__", "__array__", "__array_struct__"]) - def test_bad_array_like_attributes(self, attribute): - # Check that errors during attribute retrieval are raised unless - # they are Attribute errors. + @pytest.mark.parametrize("error", [RecursionError, MemoryError]) + def test_bad_array_like_attributes(self, attribute, error): + # RecursionError and MemoryError are considered fatal. All errors + # (except AttributeError) should probably be raised in the future, + # but shapely made use of it, so it will require a deprecation. class BadInterface: def __getattr__(self, attr): if attr == attribute: - raise RuntimeError + raise error super().__getattr__(attr) - with pytest.raises(RuntimeError): + with pytest.raises(error): np.array(BadInterface()) @pytest.mark.parametrize("error", [RecursionError, MemoryError]) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 5e6472ae5..94f61baca 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -1365,6 +1365,7 @@ def test_iter_copy(): @pytest.mark.parametrize("dtype", np.typecodes["All"]) @pytest.mark.parametrize("loop_dtype", np.typecodes["All"]) +@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning") def test_iter_copy_casts(dtype, loop_dtype): # Ensure the dtype is never flexible: if loop_dtype.lower() == "m": diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py index 4730a5a09..812461538 100644 --- a/numpy/distutils/fcompiler/__init__.py +++ b/numpy/distutils/fcompiler/__init__.py @@ -976,7 +976,7 @@ def is_free_format(file): with open(file, encoding='latin1') as f: line = f.readline() n = 10000 # the number of non-comment lines to scan for hints - if _has_f_header(line): + if _has_f_header(line) or _has_fix_header(line): n = 0 elif _has_f90_header(line): n = 0 diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 80b150655..5250fea84 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -257,6 +257,7 @@ def ismodule(rout): def isfunction(rout): return 'block' in rout and 'function' == rout['block'] + def isfunction_wrap(rout): if isintent_c(rout): return 0 @@ -284,6 +285,10 @@ def hasassumedshape(rout): return False +def requiresf90wrapper(rout): + return ismoduleroutine(rout) or hasassumedshape(rout) + + def isroutine(rout): return isfunction(rout) or issubroutine(rout) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index d27845796..1149633c0 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -3113,7 +3113,7 @@ def crack2fortrangen(block, tab='\n', as_interface=False): result = ' result (%s)' % block['result'] if block['result'] not in argsl: argsl.append(block['result']) - body = crack2fortrangen(block['body'], tab + tabchar) + body = crack2fortrangen(block['body'], tab + tabchar, as_interface=as_interface) vars = vars2fortran( block, block['vars'], argsl, tab + tabchar, as_interface=as_interface) mess = '' @@ -3231,8 +3231,13 @@ def vars2fortran(block, vars, args, tab='', as_interface=False): show(vars) outmess('vars2fortran: No definition for argument "%s".\n' % a) continue - if a == block['name'] and not block['block'] == 'function': - continue + if a == block['name']: + if block['block'] != 'function' or block.get('result'): + # 1) skip declaring a variable that name matches with + # subroutine name + # 2) skip declaring function when its type is + # declared via `result` construction + continue if 'typespec' not in vars[a]: if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: if a in args: diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py index e9976f43c..21d4c009c 100644 --- a/numpy/f2py/func2subr.py +++ b/numpy/f2py/func2subr.py @@ -130,7 +130,7 @@ def createfuncwrapper(rout, signature=0): l = l + ', ' + fortranname if need_interface: for line in rout['saved_interface'].split('\n'): - if line.lstrip().startswith('use '): + if line.lstrip().startswith('use ') and '__user__' not in line: add(line) args = args[1:] @@ -222,7 +222,7 @@ def createsubrwrapper(rout, signature=0): if need_interface: for line in rout['saved_interface'].split('\n'): - if line.lstrip().startswith('use '): + if line.lstrip().startswith('use ') and '__user__' not in line: add(line) dumped_args = [] @@ -247,7 +247,10 @@ def createsubrwrapper(rout, signature=0): pass else: add('interface') - add(rout['saved_interface'].lstrip()) + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' in line: + continue + add(line) add('end interface') sargs = ', '.join([a for a in args if a not in extra_args]) diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index f1490527e..4e1cf0c7d 100755 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -73,7 +73,7 @@ from .auxfuncs import ( issubroutine, issubroutine_wrap, isthreadsafe, isunsigned, isunsigned_char, isunsigned_chararray, isunsigned_long_long, isunsigned_long_longarray, isunsigned_short, isunsigned_shortarray, - l_and, l_not, l_or, outmess, replace, stripcomma, + l_and, l_not, l_or, outmess, replace, stripcomma, requiresf90wrapper ) from . import capi_maps @@ -1184,9 +1184,12 @@ def buildmodule(m, um): nb1['args'] = a nb_list.append(nb1) for nb in nb_list: + # requiresf90wrapper must be called before buildapi as it + # rewrites assumed shape arrays as automatic arrays. + isf90 = requiresf90wrapper(nb) api, wrap = buildapi(nb) if wrap: - if ismoduleroutine(nb): + if isf90: funcwrappers2.append(wrap) else: funcwrappers.append(wrap) @@ -1288,7 +1291,10 @@ def buildmodule(m, um): 'C It contains Fortran 77 wrappers to fortran functions.\n') lines = [] for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'): - if l and l[0] == ' ': + if 0 <= l.find('!') < 66: + # don't split comment lines + lines.append(l + '\n') + elif l and l[0] == ' ': while len(l) >= 66: lines.append(l[:66] + '\n &') l = l[66:] @@ -1310,7 +1316,10 @@ def buildmodule(m, um): '! It contains Fortran 90 wrappers to fortran functions.\n') lines = [] for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'): - if len(l) > 72 and l[0] == ' ': + if 0 <= l.find('!') < 72: + # don't split comment lines + lines.append(l + '\n') + elif len(l) > 72 and l[0] == ' ': lines.append(l[:72] + '&\n &') l = l[72:] while len(l) > 66: diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index 81650a819..37736af21 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -211,3 +211,28 @@ class TestF77CallbackPythonTLS(TestF77Callback): compiler-provided """ options = ["-DF2PY_USE_PYTHON_TLS"] + + +class TestF90Callback(util.F2PyTest): + + suffix = '.f90' + + code = textwrap.dedent( + """ + function gh17797(f, y) result(r) + external f + integer(8) :: r, f + integer(8), dimension(:) :: y + r = f(0) + r = r + sum(y) + end function gh17797 + """) + + def test_gh17797(self): + + def incr(x): + return x + 123 + + y = np.array([1, 2, 3], dtype=np.int64) + r = self.module.gh17797(incr, y) + assert r == 123 + 1 + 2 + 3 diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index aa4499764..534ab683c 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -580,7 +580,7 @@ class TestSaveTxt: memoryerror_raised.value = False try: # The test takes at least 6GB of memory, writes a file larger - # than 4GB + # than 4GB. This tests the ``allowZip64`` kwarg to ``zipfile`` test_data = np.asarray([np.random.rand( np.random.randint(50,100),4) for i in range(800000)], dtype=object) @@ -599,6 +599,9 @@ class TestSaveTxt: p.join() if memoryerror_raised.value: raise MemoryError("Child process raised a MemoryError exception") + # -9 indicates a SIGKILL, probably an OOM. + if p.exitcode == -9: + pytest.xfail("subprocess got a SIGKILL, apparently free memory was not sufficient") assert p.exitcode == 0 class LoadTxtBase: diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index 2b4cbdfbb..960797b68 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -6,11 +6,12 @@ import functools from numpy.core.numeric import ( asanyarray, arange, zeros, greater_equal, multiply, ones, asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal, - nonzero + nonzero, indices ) from numpy.core.overrides import set_array_function_like_doc, set_module from numpy.core import overrides from numpy.core import iinfo +from numpy.lib.stride_tricks import broadcast_to __all__ = [ @@ -894,7 +895,10 @@ def tril_indices(n, k=0, m=None): [-10, -10, -10, -10]]) """ - return nonzero(tri(n, m, k=k, dtype=bool)) + tri_ = tri(n, m, k=k, dtype=bool) + + return tuple(broadcast_to(inds, tri_.shape)[tri_] + for inds in indices(tri_.shape, sparse=True)) def _trilu_indices_form_dispatcher(arr, k=None): @@ -1010,7 +1014,10 @@ def triu_indices(n, k=0, m=None): [ 12, 13, 14, -1]]) """ - return nonzero(~tri(n, m, k=k-1, dtype=bool)) + tri_ = ~tri(n, m, k=k - 1, dtype=bool) + + return tuple(broadcast_to(inds, tri_.shape)[tri_] + for inds in indices(tri_.shape, sparse=True)) @array_function_dispatch(_trilu_indices_form_dispatcher) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 7d652ce89..3033a1495 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -2,6 +2,7 @@ #cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3 import operator import warnings +from collections.abc import MutableSequence from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer from cpython cimport (Py_INCREF, PyFloat_AsDouble) @@ -4347,14 +4348,14 @@ cdef class Generator: """ shuffle(x, axis=0) - Modify a sequence in-place by shuffling its contents. + Modify an array or sequence in-place by shuffling its contents. The order of sub-arrays is changed but their contents remains the same. Parameters ---------- - x : array_like - The array or list to be shuffled. + x : ndarray or MutableSequence + The array, list or mutable sequence to be shuffled. axis : int, optional The axis which `x` is shuffled along. Default is 0. It is only supported on `ndarray` objects. @@ -4414,7 +4415,11 @@ cdef class Generator: with self.lock, nogil: _shuffle_raw_wrap(&self._bitgen, n, 1, itemsize, stride, x_ptr, buf_ptr) - elif isinstance(x, np.ndarray) and x.ndim and x.size: + elif isinstance(x, np.ndarray): + if x.size == 0: + # shuffling is a no-op + return + x = np.swapaxes(x, 0, axis) buf = np.empty_like(x[0, ...]) with self.lock: @@ -4428,6 +4433,15 @@ cdef class Generator: x[i] = buf else: # Untyped path. + if not isinstance(x, MutableSequence): + # See gh-18206. We may decide to deprecate here in the future. + warnings.warn( + "`x` isn't a recognized object; `shuffle` is not guaranteed " + "to behave correctly. E.g., non-numpy array/tensor objects " + "with view semantics may contain duplicates after shuffling.", + UserWarning, stacklevel=2 + ) + if axis != 0: raise NotImplementedError("Axis argument is only supported " "on ndarray objects") diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index d43e7f5aa..814630c03 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -2,6 +2,7 @@ #cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3 import operator import warnings +from collections.abc import MutableSequence import numpy as np @@ -4402,8 +4403,8 @@ cdef class RandomState: Parameters ---------- - x : array_like - The array or list to be shuffled. + x : ndarray or MutableSequence + The array, list or mutable sequence to be shuffled. Returns ------- @@ -4456,7 +4457,11 @@ cdef class RandomState: self._shuffle_raw(n, sizeof(np.npy_intp), stride, x_ptr, buf_ptr) else: self._shuffle_raw(n, itemsize, stride, x_ptr, buf_ptr) - elif isinstance(x, np.ndarray) and x.ndim and x.size: + elif isinstance(x, np.ndarray): + if x.size == 0: + # shuffling is a no-op + return + buf = np.empty_like(x[0, ...]) with self.lock: for i in reversed(range(1, n)): @@ -4468,6 +4473,15 @@ cdef class RandomState: x[i] = buf else: # Untyped path. + if not isinstance(x, MutableSequence): + # See gh-18206. We may decide to deprecate here in the future. + warnings.warn( + "`x` isn't a recognized object; `shuffle` is not guaranteed " + "to behave correctly. E.g., non-numpy array/tensor objects " + "with view semantics may contain duplicates after shuffling.", + UserWarning, stacklevel=2 + ) + with self.lock: for i in reversed(range(1, n)): j = random_interval(&self._bitgen, i) diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index c4fb5883c..47c81584c 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -960,6 +960,14 @@ class TestRandomDist: random.shuffle(actual, axis=-1) assert_array_equal(actual, desired) + def test_shuffle_custom_axis_empty(self): + random = Generator(MT19937(self.seed)) + desired = np.array([]).reshape((0, 6)) + for axis in (0, 1): + actual = np.array([]).reshape((0, 6)) + random.shuffle(actual, axis=axis) + assert_array_equal(actual, desired) + def test_shuffle_axis_nonsquare(self): y1 = np.arange(20).reshape(2, 10) y2 = y1.copy() @@ -993,6 +1001,11 @@ class TestRandomDist: arr = [[1, 2, 3], [4, 5, 6]] assert_raises(NotImplementedError, random.shuffle, arr, 1) + arr = np.array(3) + assert_raises(TypeError, random.shuffle, arr) + arr = np.ones((3, 2)) + assert_raises(np.AxisError, random.shuffle, arr, 2) + def test_permutation(self): random = Generator(MT19937(self.seed)) alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] @@ -1004,7 +1017,7 @@ class TestRandomDist: arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T actual = random.permutation(arr_2d) assert_array_equal(actual, np.atleast_2d(desired).T) - + bad_x_str = "abcd" assert_raises(np.AxisError, random.permutation, bad_x_str) diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index b70a04347..7f5f08050 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -642,7 +642,7 @@ class TestRandomDist: a = np.array([42, 1, 2]) p = [None, None, None] assert_raises(ValueError, random.choice, a, p=p) - + def test_choice_p_non_contiguous(self): p = np.ones(10) / 5 p[1::2] = 3.0 @@ -699,6 +699,10 @@ class TestRandomDist: assert_equal( sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + def test_shuffle_invalid_objects(self): + x = np.array(3) + assert_raises(TypeError, random.shuffle, x) + def test_permutation(self): random.seed(self.seed) alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index a1d943235..4ec1f4b2f 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -302,18 +302,23 @@ from ._array_like import ( ArrayLike as ArrayLike, _ArrayLike, _NestedSequence, + _RecursiveSequence, _SupportsArray, + _ArrayND, + _ArrayOrScalar, _ArrayLikeBool_co, _ArrayLikeUInt_co, _ArrayLikeInt_co, _ArrayLikeFloat_co, _ArrayLikeComplex_co, + _ArrayLikeNumber_co, _ArrayLikeTD64_co, _ArrayLikeDT64_co, _ArrayLikeObject_co, _ArrayLikeVoid_co, _ArrayLikeStr_co, _ArrayLikeBytes_co, + ) if __doc__ is not None: diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py index 35413393c..133f38800 100644 --- a/numpy/typing/_array_like.py +++ b/numpy/typing/_array_like.py @@ -12,6 +12,7 @@ from numpy import ( integer, floating, complexfloating, + number, timedelta64, datetime64, object_, @@ -33,15 +34,17 @@ else: HAVE_PROTOCOL = True _T = TypeVar("_T") +_ScalarType = TypeVar("_ScalarType", bound=generic) _DType = TypeVar("_DType", bound="dtype[Any]") +_DType_co = TypeVar("_DType_co", covariant=True, bound="dtype[Any]") if TYPE_CHECKING or HAVE_PROTOCOL: # The `_SupportsArray` protocol only cares about the default dtype # (i.e. `dtype=None`) of the to-be returned array. # Concrete implementations of the protocol are responsible for adding # any and all remaining overloads - class _SupportsArray(Protocol[_DType]): - def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ... + class _SupportsArray(Protocol[_DType_co]): + def __array__(self, dtype: None = ...) -> ndarray[Any, _DType_co]: ... else: _SupportsArray = Any @@ -100,6 +103,10 @@ _ArrayLikeComplex_co = _ArrayLike[ "dtype[Union[bool_, integer[Any], floating[Any], complexfloating[Any, Any]]]", Union[bool, int, float, complex], ] +_ArrayLikeNumber_co = _ArrayLike[ + "dtype[Union[bool_, number[Any]]]", + Union[bool, int, float, complex], +] _ArrayLikeTD64_co = _ArrayLike[ "dtype[Union[bool_, integer[Any], timedelta64]]", Union[bool, int], @@ -116,3 +123,10 @@ _ArrayLikeBytes_co = _ArrayLike[ "dtype[bytes_]", bytes, ] + +if TYPE_CHECKING: + _ArrayND = ndarray[Any, dtype[_ScalarType]] + _ArrayOrScalar = Union[_ScalarType, _ArrayND[_ScalarType]] +else: + _ArrayND = Any + _ArrayOrScalar = Any diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py index e1fdee3ba..1591ca144 100644 --- a/numpy/typing/_callable.py +++ b/numpy/typing/_callable.py @@ -8,6 +8,8 @@ See the `Mypy documentation`_ on protocols for more details. """ +from __future__ import annotations + import sys from typing import ( Union, @@ -21,6 +23,7 @@ from typing import ( from numpy import ( ndarray, + dtype, generic, bool_, timedelta64, @@ -44,7 +47,7 @@ from ._scalars import ( _NumberLike_co, ) from . import NBitBase -from ._array_like import ArrayLike +from ._array_like import ArrayLike, _ArrayOrScalar if sys.version_info >= (3, 8): from typing import Protocol @@ -58,8 +61,9 @@ else: HAVE_PROTOCOL = True if TYPE_CHECKING or HAVE_PROTOCOL: - _T = TypeVar("_T") - _2Tuple = Tuple[_T, _T] + _T1 = TypeVar("_T1") + _T2 = TypeVar("_T2") + _2Tuple = Tuple[_T1, _T1] _NBit1 = TypeVar("_NBit1", bound=NBitBase) _NBit2 = TypeVar("_NBit2", bound=NBitBase) @@ -316,11 +320,11 @@ if TYPE_CHECKING or HAVE_PROTOCOL: class _NumberOp(Protocol): def __call__(self, __other: _NumberLike_co) -> Any: ... - class _ComparisonOp(Protocol[_T]): + class _ComparisonOp(Protocol[_T1, _T2]): @overload - def __call__(self, __other: _T) -> bool_: ... + def __call__(self, __other: _T1) -> bool_: ... @overload - def __call__(self, __other: ArrayLike) -> Union[ndarray, bool_]: ... + def __call__(self, __other: _T2) -> _ArrayOrScalar[bool_]: ... else: _BoolOp = Any diff --git a/numpy/typing/tests/data/fail/array_constructors.py b/numpy/typing/tests/data/fail/array_constructors.py index 9cb59fe5f..f13fdacb2 100644 --- a/numpy/typing/tests/data/fail/array_constructors.py +++ b/numpy/typing/tests/data/fail/array_constructors.py @@ -7,10 +7,10 @@ np.require(a, requirements=1) # E: No overload variant np.require(a, requirements="TEST") # E: incompatible type np.zeros("test") # E: incompatible type -np.zeros() # E: Too few arguments +np.zeros() # E: Missing positional argument np.ones("test") # E: incompatible type -np.ones() # E: Too few arguments +np.ones() # E: Missing positional argument np.array(0, float, True) # E: Too many positional diff --git a/numpy/typing/tests/data/fail/comparisons.py b/numpy/typing/tests/data/fail/comparisons.py new file mode 100644 index 000000000..cad1c6555 --- /dev/null +++ b/numpy/typing/tests/data/fail/comparisons.py @@ -0,0 +1,28 @@ +from typing import Any +import numpy as np + +AR_i: np.ndarray[Any, np.dtype[np.int64]] +AR_f: np.ndarray[Any, np.dtype[np.float64]] +AR_c: np.ndarray[Any, np.dtype[np.complex128]] +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] + +AR_f > AR_m # E: Unsupported operand types +AR_c > AR_m # E: Unsupported operand types + +AR_m > AR_f # E: Unsupported operand types +AR_m > AR_c # E: Unsupported operand types + +AR_i > AR_M # E: Unsupported operand types +AR_f > AR_M # E: Unsupported operand types +AR_m > AR_M # E: Unsupported operand types + +AR_M > AR_i # E: Unsupported operand types +AR_M > AR_f # E: Unsupported operand types +AR_M > AR_m # E: Unsupported operand types + +# Unfortunately `NoReturn` errors are not the most descriptive +_1 = AR_i > str() # E: Need type annotation +_2 = AR_i > bytes() # E: Need type annotation +_3 = str() > AR_M # E: Need type annotation +_4 = bytes() > AR_M # E: Need type annotation diff --git a/numpy/typing/tests/data/mypy.ini b/numpy/typing/tests/data/mypy.ini index 35cfbec89..548f76261 100644 --- a/numpy/typing/tests/data/mypy.ini +++ b/numpy/typing/tests/data/mypy.ini @@ -1,5 +1,6 @@ [mypy] plugins = numpy.typing.mypy_plugin +show_absolute_path = True [mypy-numpy] ignore_errors = True diff --git a/numpy/typing/tests/data/pass/comparisons.py b/numpy/typing/tests/data/pass/comparisons.py index b298117a6..ce41de435 100644 --- a/numpy/typing/tests/data/pass/comparisons.py +++ b/numpy/typing/tests/data/pass/comparisons.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +from typing import Any import numpy as np c16 = np.complex128() @@ -20,11 +23,62 @@ c = complex() f = float() i = int() -AR = np.array([0], dtype=np.int64) -AR.setflags(write=False) - SEQ = (0, 1, 2, 3, 4) +AR_b: np.ndarray[Any, np.dtype[np.bool_]] = np.array([True]) +AR_u: np.ndarray[Any, np.dtype[np.uint32]] = np.array([1], dtype=np.uint32) +AR_i: np.ndarray[Any, np.dtype[np.int_]] = np.array([1]) +AR_f: np.ndarray[Any, np.dtype[np.float_]] = np.array([1.0]) +AR_c: np.ndarray[Any, np.dtype[np.complex_]] = np.array([1.0j]) +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64("1")]) +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64("1")]) +AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([1], dtype=object) + +# Arrays + +AR_b > AR_b +AR_b > AR_u +AR_b > AR_i +AR_b > AR_f +AR_b > AR_c + +AR_u > AR_b +AR_u > AR_u +AR_u > AR_i +AR_u > AR_f +AR_u > AR_c + +AR_i > AR_b +AR_i > AR_u +AR_i > AR_i +AR_i > AR_f +AR_i > AR_c + +AR_f > AR_b +AR_f > AR_u +AR_f > AR_i +AR_f > AR_f +AR_f > AR_c + +AR_c > AR_b +AR_c > AR_u +AR_c > AR_i +AR_c > AR_f +AR_c > AR_c + +AR_m > AR_b +AR_m > AR_u +AR_m > AR_i +AR_b > AR_m +AR_u > AR_m +AR_i > AR_m + +AR_M > AR_M + +AR_O > AR_O +1 > AR_O +AR_O > 1 + # Time structures dt > dt @@ -33,7 +87,7 @@ td > td td > i td > i4 td > i8 -td > AR +td > AR_i td > SEQ # boolean @@ -51,7 +105,7 @@ b_ > f4 b_ > c b_ > c16 b_ > c8 -b_ > AR +b_ > AR_i b_ > SEQ # Complex @@ -67,7 +121,7 @@ c16 > b c16 > c c16 > f c16 > i -c16 > AR +c16 > AR_i c16 > SEQ c16 > c16 @@ -81,7 +135,7 @@ b > c16 c > c16 f > c16 i > c16 -AR > c16 +AR_i > c16 SEQ > c16 c8 > c16 @@ -95,7 +149,7 @@ c8 > b c8 > c c8 > f c8 > i -c8 > AR +c8 > AR_i c8 > SEQ c16 > c8 @@ -109,7 +163,7 @@ b > c8 c > c8 f > c8 i > c8 -AR > c8 +AR_i > c8 SEQ > c8 # Float @@ -123,7 +177,7 @@ f8 > b f8 > c f8 > f f8 > i -f8 > AR +f8 > AR_i f8 > SEQ f8 > f8 @@ -135,7 +189,7 @@ b > f8 c > f8 f > f8 i > f8 -AR > f8 +AR_i > f8 SEQ > f8 f4 > f8 @@ -147,7 +201,7 @@ f4 > b f4 > c f4 > f f4 > i -f4 > AR +f4 > AR_i f4 > SEQ f8 > f4 @@ -159,7 +213,7 @@ b > f4 c > f4 f > f4 i > f4 -AR > f4 +AR_i > f4 SEQ > f4 # Int @@ -173,7 +227,7 @@ i8 > b i8 > c i8 > f i8 > i -i8 > AR +i8 > AR_i i8 > SEQ u8 > u8 @@ -184,7 +238,7 @@ u8 > b u8 > c u8 > f u8 > i -u8 > AR +u8 > AR_i u8 > SEQ i8 > i8 @@ -196,7 +250,7 @@ b > i8 c > i8 f > i8 i > i8 -AR > i8 +AR_i > i8 SEQ > i8 u8 > u8 @@ -207,7 +261,7 @@ b > u8 c > u8 f > u8 i > u8 -AR > u8 +AR_i > u8 SEQ > u8 i4 > i8 @@ -215,7 +269,7 @@ i4 > i4 i4 > i i4 > b_ i4 > b -i4 > AR +i4 > AR_i i4 > SEQ u4 > i8 @@ -225,7 +279,7 @@ u4 > u4 u4 > i u4 > b_ u4 > b -u4 > AR +u4 > AR_i u4 > SEQ i8 > i4 @@ -233,7 +287,7 @@ i4 > i4 i > i4 b_ > i4 b > i4 -AR > i4 +AR_i > i4 SEQ > i4 i8 > u4 @@ -243,5 +297,5 @@ u4 > u4 b_ > u4 b > u4 i > u4 -AR > u4 +AR_i > u4 SEQ > u4 diff --git a/numpy/typing/tests/data/reveal/comparisons.py b/numpy/typing/tests/data/reveal/comparisons.py index 507f713c7..5053a9e82 100644 --- a/numpy/typing/tests/data/reveal/comparisons.py +++ b/numpy/typing/tests/data/reveal/comparisons.py @@ -33,8 +33,13 @@ reveal_type(td > td) # E: numpy.bool_ reveal_type(td > i) # E: numpy.bool_ reveal_type(td > i4) # E: numpy.bool_ reveal_type(td > i8) # E: numpy.bool_ -reveal_type(td > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(td > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] + +reveal_type(td > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(td > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(AR > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(AR > td) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > td) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] # boolean @@ -51,8 +56,8 @@ reveal_type(b_ > f4) # E: numpy.bool_ reveal_type(b_ > c) # E: numpy.bool_ reveal_type(b_ > c16) # E: numpy.bool_ reveal_type(b_ > c8) # E: numpy.bool_ -reveal_type(b_ > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(b_ > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(b_ > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(b_ > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] # Complex @@ -67,8 +72,8 @@ reveal_type(c16 > b) # E: numpy.bool_ reveal_type(c16 > c) # E: numpy.bool_ reveal_type(c16 > f) # E: numpy.bool_ reveal_type(c16 > i) # E: numpy.bool_ -reveal_type(c16 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(c16 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(c16 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(c16 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(c16 > c16) # E: numpy.bool_ reveal_type(f8 > c16) # E: numpy.bool_ @@ -81,8 +86,8 @@ reveal_type(b > c16) # E: numpy.bool_ reveal_type(c > c16) # E: numpy.bool_ reveal_type(f > c16) # E: numpy.bool_ reveal_type(i > c16) # E: numpy.bool_ -reveal_type(AR > c16) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(SEQ > c16) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(AR > c16) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > c16) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(c8 > c16) # E: numpy.bool_ reveal_type(c8 > f8) # E: numpy.bool_ @@ -95,8 +100,8 @@ reveal_type(c8 > b) # E: numpy.bool_ reveal_type(c8 > c) # E: numpy.bool_ reveal_type(c8 > f) # E: numpy.bool_ reveal_type(c8 > i) # E: numpy.bool_ -reveal_type(c8 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(c8 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(c8 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(c8 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(c16 > c8) # E: numpy.bool_ reveal_type(f8 > c8) # E: numpy.bool_ @@ -109,8 +114,8 @@ reveal_type(b > c8) # E: numpy.bool_ reveal_type(c > c8) # E: numpy.bool_ reveal_type(f > c8) # E: numpy.bool_ reveal_type(i > c8) # E: numpy.bool_ -reveal_type(AR > c8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(SEQ > c8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(AR > c8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > c8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] # Float @@ -123,8 +128,8 @@ reveal_type(f8 > b) # E: numpy.bool_ reveal_type(f8 > c) # E: numpy.bool_ reveal_type(f8 > f) # E: numpy.bool_ reveal_type(f8 > i) # E: numpy.bool_ -reveal_type(f8 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(f8 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(f8 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(f8 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(f8 > f8) # E: numpy.bool_ reveal_type(i8 > f8) # E: numpy.bool_ @@ -135,8 +140,8 @@ reveal_type(b > f8) # E: numpy.bool_ reveal_type(c > f8) # E: numpy.bool_ reveal_type(f > f8) # E: numpy.bool_ reveal_type(i > f8) # E: numpy.bool_ -reveal_type(AR > f8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(SEQ > f8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(AR > f8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > f8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(f4 > f8) # E: numpy.bool_ reveal_type(f4 > i8) # E: numpy.bool_ @@ -147,8 +152,8 @@ reveal_type(f4 > b) # E: numpy.bool_ reveal_type(f4 > c) # E: numpy.bool_ reveal_type(f4 > f) # E: numpy.bool_ reveal_type(f4 > i) # E: numpy.bool_ -reveal_type(f4 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(f4 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(f4 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(f4 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(f8 > f4) # E: numpy.bool_ reveal_type(i8 > f4) # E: numpy.bool_ @@ -159,8 +164,8 @@ reveal_type(b > f4) # E: numpy.bool_ reveal_type(c > f4) # E: numpy.bool_ reveal_type(f > f4) # E: numpy.bool_ reveal_type(i > f4) # E: numpy.bool_ -reveal_type(AR > f4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(SEQ > f4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(AR > f4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > f4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] # Int @@ -173,8 +178,8 @@ reveal_type(i8 > b) # E: numpy.bool_ reveal_type(i8 > c) # E: numpy.bool_ reveal_type(i8 > f) # E: numpy.bool_ reveal_type(i8 > i) # E: numpy.bool_ -reveal_type(i8 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(i8 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(i8 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(i8 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(u8 > u8) # E: numpy.bool_ reveal_type(u8 > i4) # E: numpy.bool_ @@ -184,8 +189,8 @@ reveal_type(u8 > b) # E: numpy.bool_ reveal_type(u8 > c) # E: numpy.bool_ reveal_type(u8 > f) # E: numpy.bool_ reveal_type(u8 > i) # E: numpy.bool_ -reveal_type(u8 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(u8 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(u8 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(u8 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(i8 > i8) # E: numpy.bool_ reveal_type(u8 > i8) # E: numpy.bool_ @@ -196,8 +201,8 @@ reveal_type(b > i8) # E: numpy.bool_ reveal_type(c > i8) # E: numpy.bool_ reveal_type(f > i8) # E: numpy.bool_ reveal_type(i > i8) # E: numpy.bool_ -reveal_type(AR > i8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(SEQ > i8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(AR > i8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > i8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(u8 > u8) # E: numpy.bool_ reveal_type(i4 > u8) # E: numpy.bool_ @@ -207,16 +212,16 @@ reveal_type(b > u8) # E: numpy.bool_ reveal_type(c > u8) # E: numpy.bool_ reveal_type(f > u8) # E: numpy.bool_ reveal_type(i > u8) # E: numpy.bool_ -reveal_type(AR > u8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(SEQ > u8) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(AR > u8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > u8) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(i4 > i8) # E: numpy.bool_ reveal_type(i4 > i4) # E: numpy.bool_ reveal_type(i4 > i) # E: numpy.bool_ reveal_type(i4 > b_) # E: numpy.bool_ reveal_type(i4 > b) # E: numpy.bool_ -reveal_type(i4 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(i4 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(i4 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(i4 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(u4 > i8) # E: numpy.bool_ reveal_type(u4 > i4) # E: numpy.bool_ @@ -225,16 +230,16 @@ reveal_type(u4 > u4) # E: numpy.bool_ reveal_type(u4 > i) # E: numpy.bool_ reveal_type(u4 > b_) # E: numpy.bool_ reveal_type(u4 > b) # E: numpy.bool_ -reveal_type(u4 > AR) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(u4 > SEQ) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(u4 > AR) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(u4 > SEQ) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(i8 > i4) # E: numpy.bool_ reveal_type(i4 > i4) # E: numpy.bool_ reveal_type(i > i4) # E: numpy.bool_ reveal_type(b_ > i4) # E: numpy.bool_ reveal_type(b > i4) # E: numpy.bool_ -reveal_type(AR > i4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(SEQ > i4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(AR > i4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > i4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] reveal_type(i8 > u4) # E: numpy.bool_ reveal_type(i4 > u4) # E: numpy.bool_ @@ -243,5 +248,5 @@ reveal_type(u4 > u4) # E: numpy.bool_ reveal_type(b_ > u4) # E: numpy.bool_ reveal_type(b > u4) # E: numpy.bool_ reveal_type(i > u4) # E: numpy.bool_ -reveal_type(AR > u4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] -reveal_type(SEQ > u4) # E: Union[numpy.ndarray[Any, Any], numpy.bool_] +reveal_type(AR > u4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(SEQ > u4) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index 18520a757..324312a92 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -25,15 +25,48 @@ REVEAL_DIR = os.path.join(DATA_DIR, "reveal") MYPY_INI = os.path.join(DATA_DIR, "mypy.ini") CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache") +#: A dictionary with file names as keys and lists of the mypy stdout as values. +#: To-be populated by `run_mypy`. +OUTPUT_MYPY: Dict[str, List[str]] = {} + + +def _key_func(key: str) -> str: + """Split at the first occurance of the ``:`` character. + + Windows drive-letters (*e.g.* ``C:``) are ignored herein. + """ + drive, tail = os.path.splitdrive(key) + return os.path.join(drive, tail.split(":", 1)[0]) + @pytest.mark.slow @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") -@pytest.fixture(scope="session", autouse=True) -def clear_cache() -> None: - """Clears the mypy cache before running any of the typing tests.""" +@pytest.fixture(scope="module", autouse=True) +def run_mypy() -> None: + """Clears the cache and run mypy before running any of the typing tests. + + The mypy results are cached in `OUTPUT_MYPY` for further use. + + """ if os.path.isdir(CACHE_DIR): shutil.rmtree(CACHE_DIR) + for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR): + # Run mypy + stdout, stderr, _ = api.run([ + "--config-file", + MYPY_INI, + "--cache-dir", + CACHE_DIR, + directory, + ]) + assert not stderr, directory + stdout = stdout.replace('*', '') + + # Parse the output + iterator = itertools.groupby(stdout.split("\n"), key=_key_func) + OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k) + def get_test_cases(directory): for root, _, files in os.walk(directory): @@ -54,15 +87,9 @@ def get_test_cases(directory): @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") @pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) def test_success(path): - stdout, stderr, exitcode = api.run([ - "--config-file", - MYPY_INI, - "--cache-dir", - CACHE_DIR, - path, - ]) - assert exitcode == 0, stdout - assert re.match(r"Success: no issues found in \d+ source files?", stdout.strip()) + # Alias `OUTPUT_MYPY` so that it appears in the local namespace + output_mypy = OUTPUT_MYPY + assert path not in output_mypy @pytest.mark.slow @@ -71,29 +98,14 @@ def test_success(path): def test_fail(path): __tracebackhide__ = True - stdout, stderr, exitcode = api.run([ - "--config-file", - MYPY_INI, - "--cache-dir", - CACHE_DIR, - path, - ]) - assert exitcode != 0 - with open(path) as fin: lines = fin.readlines() errors = defaultdict(lambda: "") - error_lines = stdout.rstrip("\n").split("\n") - assert re.match( - r"Found \d+ errors? in \d+ files? \(checked \d+ source files?\)", - error_lines[-1].strip(), - ) - for error_line in error_lines[:-1]: - error_line = error_line.strip() - if not error_line: - continue + output_mypy = OUTPUT_MYPY + assert path in output_mypy + for error_line in output_mypy[path]: match = re.match( r"^.+\.py:(?P<lineno>\d+): (error|note): .+$", error_line, @@ -215,23 +227,12 @@ def _parse_reveals(file: IO[str]) -> List[str]: def test_reveal(path): __tracebackhide__ = True - stdout, stderr, exitcode = api.run([ - "--config-file", - MYPY_INI, - "--cache-dir", - CACHE_DIR, - path, - ]) - with open(path) as fin: lines = _parse_reveals(fin) - stdout_list = stdout.replace('*', '').split("\n") - for error_line in stdout_list: - error_line = error_line.strip() - if not error_line: - continue - + output_mypy = OUTPUT_MYPY + assert path in output_mypy + for error_line in output_mypy[path]: match = re.match( r"^.+\.py:(?P<lineno>\d+): note: .+$", error_line, diff --git a/pavement.py b/pavement.py index 373354432..66c2cf953 100644 --- a/pavement.py +++ b/pavement.py @@ -26,6 +26,7 @@ import os import sys import shutil import hashlib +import textwrap # The paver package needs to be installed to run tasks import paver @@ -49,25 +50,13 @@ options(installers=Bunch(releasedir="release", installersdir=os.path.join("release", "installers")),) -#----------------------------- -# Generate the release version -#----------------------------- +#------------------------ +# Get the release version +#------------------------ sys.path.insert(0, os.path.dirname(__file__)) try: - setup_py = __import__("setup") - FULLVERSION = setup_py.VERSION - # This is duplicated from setup.py - if os.path.exists('.git'): - GIT_REVISION = setup_py.git_version() - elif os.path.exists('numpy/version.py'): - # must be a source distribution, use existing version file - from numpy.version import git_revision as GIT_REVISION - else: - GIT_REVISION = "Unknown" - - if not setup_py.ISRELEASED: - FULLVERSION += '.dev0+' + GIT_REVISION[:7] + from setup import FULLVERSION finally: sys.path.pop(0) @@ -210,22 +199,25 @@ def write_release_task(options, filename='README'): with open(notes) as fnotes: freadme.write(fnotes.read()) - freadme.writelines(""" -Checksums -========= + freadme.writelines(textwrap.dedent( + """ + Checksums + ========= -MD5 ---- -:: + MD5 + --- + :: -""") + """)) freadme.writelines([f' {c}\n' for c in compute_md5(idirs)]) - freadme.writelines(""" -SHA256 ------- -:: -""") + freadme.writelines(textwrap.dedent( + """ + SHA256 + ------ + :: + + """)) freadme.writelines([f' {c}\n' for c in compute_sha256(idirs)]) # generate md file using pandoc before signing diff --git a/test_requirements.txt b/test_requirements.txt index 52109a5d5..a0956a4b0 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,15 +1,15 @@ cython==0.29.21 wheel<0.36.3 setuptools<49.2.0 -hypothesis==6.0.2 +hypothesis==6.0.3 pytest==6.2.1 pytz==2020.5 -pytest-cov==2.10.1 +pytest-cov==2.11.1 pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy' # for numpy.random.test.test_extending cffi # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # - Mypy doesn't currently work on Python 3.9 -mypy==0.790; platform_python_implementation != "PyPy" +mypy==0.800; platform_python_implementation != "PyPy" typing_extensions diff --git a/tools/openblas_support.py b/tools/openblas_support.py index dff19274e..1300795bb 100644 --- a/tools/openblas_support.py +++ b/tools/openblas_support.py @@ -13,7 +13,7 @@ from urllib.request import urlopen, Request from urllib.error import HTTPError OPENBLAS_V = '0.3.13' -OPENBLAS_LONG = 'v0.3.13' +OPENBLAS_LONG = 'v0.3.13-62-gaf2b0d02' BASE_LOC = 'https://anaconda.org/multibuild-wheels-staging/openblas-libs' BASEURL = f'{BASE_LOC}/{OPENBLAS_LONG}/download' ARCHITECTURES = ['', 'windows', 'darwin', 'aarch64', 'x86_64', |