diff options
86 files changed, 1079 insertions, 397 deletions
diff --git a/.appveyor.yml b/.appveyor.yml index 99936267a..f5696e950 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -20,11 +20,6 @@ environment: NPY_NUM_BUILD_JOBS: 4 matrix: - - PYTHON: C:\Python34-x64 - PYTHON_VERSION: 3.4 - PYTHON_ARCH: 64 - TEST_MODE: fast - - PYTHON: C:\Python36 PYTHON_VERSION: 3.6 PYTHON_ARCH: 32 diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..57c98060e --- /dev/null +++ b/.github/CODE_OF_CONDUCT.md @@ -0,0 +1 @@ +NumPy has a Code of Conduct, please see: https://www.numpy.org/devdocs/dev/conduct/code_of_conduct.html diff --git a/.travis.yml b/.travis.yml index 904094d47..7c8ec2ee0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,7 @@ # After changing this file, check it on: # http://lint.travis-ci.org/ language: python - +group: travis_latest # Run jobs on container-based infrastructure, can be overridden per job sudo: false @@ -32,7 +32,6 @@ env: python: - 2.7 - - 3.4 - 3.5 - 3.6 matrix: @@ -49,7 +48,9 @@ matrix: packages: - dpkg - debootstrap - - python: 3.4 + - python: 3.5 + dist: xenial # Required for python3.5-dbg + sudo: true # travis-ci/travis-ci#9069 env: USE_DEBUG=1 addons: apt: diff --git a/MANIFEST.in b/MANIFEST.in index eff19e20a..e15e0e58a 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,7 +1,8 @@ # # Use .add_data_files and .add_data_dir methods in a appropriate # setup.py files to include non-python files such as documentation, -# data, etc files to distribution. Avoid using MANIFEST.in for that. +# data, etc files to distribution (*for installation*). +# Avoid using MANIFEST.in for that. # include MANIFEST.in include pytest.ini @@ -12,21 +13,27 @@ recursive-include numpy/random/mtrand *.pyx *.pxd # Add build support that should go in sdist, but not go in bdist/be installed recursive-include numpy/_build_utils * recursive-include numpy/linalg/lapack_lite *.c *.h -include tox.ini +include runtests.py +include tox.ini pytest.ini .coveragerc +recursive-include tools * # Add sdist files whose use depends on local configuration. -include numpy/core/src/multiarray/cblasfuncs.c -include numpy/core/src/multiarray/python_xerbla.c +include numpy/core/src/common/cblasfuncs.c +include numpy/core/src/common/python_xerbla.c # Adding scons build related files not found by distutils recursive-include numpy/core/code_generators *.py *.txt recursive-include numpy/core *.in *.h -# Add documentation: we don't use add_data_dir since we do not want to include -# this at installation, only for sdist-generated tarballs -include doc/Makefile doc/postprocess.py -recursive-include doc/release * -recursive-include doc/source * -recursive-include doc/sphinxext * -recursive-include tools/allocation_tracking * -recursive-include tools/swig * -recursive-include doc/scipy-sphinx-theme * - +# Add documentation and benchmarks: we don't use add_data_dir since we do not +# want to include this at installation, only for sdist-generated tarballs +# Docs: +recursive-include doc * +prune doc/build +prune doc/source/generated +# Benchmarks: +recursive-include benchmarks * +prune benchmarks/env +prune benchmarks/results +prune benchmarks/html +prune benchmarks/numpy +# Exclude generated files +prune */__pycache__ global-exclude *.pyc *.pyo *.pyd *.swp *.bak *~ @@ -20,8 +20,8 @@ It provides: Testing: -- NumPy versions >= 1.15 require ``pytest`` -- NumPy versions < 1.15 require ``nose`` +- NumPy versions ≥ 1.15 require `pytest` +- NumPy versions < 1.15 require `nose` Tests can then be run after installation with: diff --git a/benchmarks/benchmarks/bench_shape_base.py b/benchmarks/benchmarks/bench_shape_base.py index 9d0f0ae04..b05ea8263 100644 --- a/benchmarks/benchmarks/bench_shape_base.py +++ b/benchmarks/benchmarks/bench_shape_base.py @@ -33,18 +33,6 @@ class Block(Benchmark): self.six = 6 * np.ones(5 * n) self.zero = np.zeros((2 * n, 6 * n)) - self.a000 = np.ones((2 * n, 2 * n, 2 * n), int) * 1 - - self.a100 = np.ones((3 * n, 2 * n, 2 * n), int) * 2 - self.a010 = np.ones((2 * n, 3 * n, 2 * n), int) * 3 - self.a001 = np.ones((2 * n, 2 * n, 3 * n), int) * 4 - - self.a011 = np.ones((2 * n, 3 * n, 3 * n), int) * 5 - self.a101 = np.ones((3 * n, 2 * n, 3 * n), int) * 6 - self.a110 = np.ones((3 * n, 3 * n, 2 * n), int) * 7 - - self.a111 = np.ones((3 * n, 3 * n, 3 * n), int) * 8 - def time_block_simple_row_wise(self, n): np.block([self.a_2d, self.b_2d]) @@ -72,6 +60,29 @@ class Block(Benchmark): [self.zero] ]) + def time_no_lists(self, n): + np.block(1) + np.block(np.eye(3 * n)) + + +class Block3D(Benchmark): + params = [1, 10, 100] + param_names = ['size'] + + def setup(self, n): + # Slow setup method: hence separated from the others above + self.a000 = np.ones((2 * n, 2 * n, 2 * n), int) * 1 + + self.a100 = np.ones((3 * n, 2 * n, 2 * n), int) * 2 + self.a010 = np.ones((2 * n, 3 * n, 2 * n), int) * 3 + self.a001 = np.ones((2 * n, 2 * n, 3 * n), int) * 4 + + self.a011 = np.ones((2 * n, 3 * n, 3 * n), int) * 5 + self.a101 = np.ones((3 * n, 2 * n, 3 * n), int) * 6 + self.a110 = np.ones((3 * n, 3 * n, 2 * n), int) * 7 + + self.a111 = np.ones((3 * n, 3 * n, 3 * n), int) * 8 + def time_3d(self, n): np.block([ [ @@ -84,6 +95,5 @@ class Block(Benchmark): ] ]) - def time_no_lists(self, n): - np.block(1) - np.block(np.eye(3 * n)) + # Retain old benchmark name for backward compat + time_3d.benchmark_name = "bench_shape_base.Block.time_3d" diff --git a/doc/release/1.16.0-notes.rst b/doc/release/1.16.0-notes.rst index 03f4e8479..4a38af777 100644 --- a/doc/release/1.16.0-notes.rst +++ b/doc/release/1.16.0-notes.rst @@ -2,6 +2,15 @@ NumPy 1.16.0 Release Notes ========================== +This NumPy release is the last one to support Python 2.7. It will be maintained +as a long term release with bug fixes only through 2020. To that end, the +planned code reorganization detailed in NEP-0015 has been made in order to +facilitate backporting fixes from future releases, which will now have the +same code organization. + +Support for Python 3.4 been dropped in this release, the supported Python +versions are 2.7 and 3.5-3.7. The wheels are linked with OpenBLAS v0.3.0 . + Highlights ========== @@ -27,9 +36,12 @@ and not documented. They will be removed in the 1.18 release. Use These were deprecated in 1.10, had no tests, and seem to no longer work in 1.15 anyway. + Future Changes ============== +* NumPy 1.17 will drop support for Python 2.7. + Compatibility notes =================== @@ -100,7 +112,6 @@ Changes Comparison ufuncs will now error rather than return NotImplemented ------------------------------------------------------------------ - Previously, comparison ufuncs such as ``np.equal`` would return `NotImplemented` if their arguments had structured dtypes, to help comparison operators such as ``__eq__`` deal with those. This is no longer needed, as the @@ -115,3 +126,20 @@ raise a ``DeprecationWarning`` if the array is not numerical (i.e., if ``np.positive(array)`` raises a ``TypeError``. For ``ndarray`` subclasses that override the default ``__array_ufunc__`` implementation, the ``TypeError`` is passed on. + +``maximum`` and ``minimum`` set invalid float status for more dtypes +-------------------------------------------------------------------- +Previously only ``float32`` and ``float64`` set invalid float status (by +default emitting a `RuntimeWarning`) when a Nan is encountered in +`numpy.maximum` and `numpy.minimum`. Now ``float16``, ``complex64``, +``complex128`` and ``complex256`` will do so as well. + +Umath and multiarray c-extension modules merged into a single module +-------------------------------------------------------------------- +The two modules were merged, according to the first step in `NEP 15`_. +Previously `np.core.umath` and `np.core.multiarray` were the c-extension +modules, they are now python wrappers to the single `np.core/_multiarray_math` +c-extension module. + +.. _`NEP 15` : http://www.numpy.org/neps/nep-0015-merge-multiarray-umath.html + diff --git a/doc/source/dev/conduct/code_of_conduct.rst b/doc/source/dev/conduct/code_of_conduct.rst new file mode 100644 index 000000000..604f14662 --- /dev/null +++ b/doc/source/dev/conduct/code_of_conduct.rst @@ -0,0 +1,164 @@ +NumPy Code of Conduct +===================== + + +Introduction +------------ + +This code of conduct applies to all spaces managed by the NumPy project, +including all public and private mailing lists, issue trackers, wikis, blogs, +Twitter, and any other communication channel used by our community. The NumPy +project does not organise in-person events, however events related to our +community should have a code of conduct similar in spirit to this one. + +This code of conduct should be honored by everyone who participates in +the NumPy community formally or informally, or claims any affiliation with the +project, in any project-related activities and especially when representing the +project, in any role. + +This code is not exhaustive or complete. It serves to distill our common +understanding of a collaborative, shared environment and goals. Please try to +follow this code in spirit as much as in letter, to create a friendly and +productive environment that enriches the surrounding community. + + +Specific Guidelines +------------------- + +We strive to: + +1. Be open. We invite anyone to participate in our community. We prefer to use + public methods of communication for project-related messages, unless + discussing something sensitive. This applies to messages for help or + project-related support, too; not only is a public support request much more + likely to result in an answer to a question, it also ensures that any + inadvertent mistakes in answering are more easily detected and corrected. + +2. Be empathetic, welcoming, friendly, and patient. We work together to resolve + conflict, and assume good intentions. We may all experience some frustration + from time to time, but we do not allow frustration to turn into a personal + attack. A community where people feel uncomfortable or threatened is not a + productive one. + +3. Be collaborative. Our work will be used by other people, and in turn we will + depend on the work of others. When we make something for the benefit of the + project, we are willing to explain to others how it works, so that they can + build on the work to make it even better. Any decision we make will affect + users and colleagues, and we take those consequences seriously when making + decisions. + +4. Be inquisitive. Nobody knows everything! Asking questions early avoids many + problems later, so we encourage questions, although we may direct them to + the appropriate forum. We will try hard to be responsive and helpful. + +5. Be careful in the words that we choose. We are careful and respectful in + our communication and we take responsibility for our own speech. Be kind to + others. Do not insult or put down other participants. We will not accept + harassment or other exclusionary behaviour, such as: + + - Violent threats or language directed against another person. + - Sexist, racist, or otherwise discriminatory jokes and language. + - Posting sexually explicit or violent material. + - Posting (or threatening to post) other people's personally identifying information ("doxing"). + - Sharing private content, such as emails sent privately or non-publicly, + or unlogged forums such as IRC channel history, without the sender's consent. + - Personal insults, especially those using racist or sexist terms. + - Unwelcome sexual attention. + - Excessive profanity. Please avoid swearwords; people differ greatly in their sensitivity to swearing. + - Repeated harassment of others. In general, if someone asks you to stop, then stop. + - Advocating for, or encouraging, any of the above behaviour. + + +Diversity Statement +------------------- + +The NumPy project welcomes and encourages participation by everyone. We are +committed to being a community that everyone enjoys being part of. Although +we may not always be able to accommodate each individual's preferences, we try +our best to treat everyone kindly. + +No matter how you identify yourself or how others perceive you: we welcome you. +Though no list can hope to be comprehensive, we explicitly honour diversity in: +age, culture, ethnicity, genotype, gender identity or expression, language, +national origin, neurotype, phenotype, political beliefs, profession, race, +religion, sexual orientation, socioeconomic status, subculture and technical +ability, to the extent that these do not conflict with this code of conduct. + + +Though we welcome people fluent in all languages, NumPy development is +conducted in English. + +Standards for behaviour in the NumPy community are detailed in the Code of +Conduct above. Participants in our community should uphold these standards +in all their interactions and help others to do so as well (see next section). + + +Reporting Guidelines +-------------------- + +We know that it is painfully common for internet communication to start at or +devolve into obvious and flagrant abuse. We also recognize that sometimes +people may have a bad day, or be unaware of some of the guidelines in this Code +of Conduct. Please keep this in mind when deciding on how to respond to a +breach of this Code. + +For clearly intentional breaches, report those to the Code of Conduct committee +(see below). For possibly unintentional breaches, you may reply to the person +and point out this code of conduct (either in public or in private, whatever is +most appropriate). If you would prefer not to do that, please feel free to +report to the Code of Conduct Committee directly, or ask the Committee for +advice, in confidence. + +You can report issues to the NumPy Code of Conduct committee, at +numpy-conduct@googlegroups.com. Currently, the committee consists of: + +- Stefan van der Walt +- Nathaniel J. Smith +- Ralf Gommers + +If your report involves any members of the committee, or if they feel they have +a conflict of interest in handling it, then they will recuse themselves from +considering your report. Alternatively, if for any reason you feel +uncomfortable making a report to the committee, then you can also contact: + +- NumFOCUS Executive Director: Leah Silen +- NumFOCUS President: Andy Terrel + + +Incident reporting resolution & Code of Conduct enforcement +----------------------------------------------------------- + +*This section summarizes the most important points, more details can be found +in* :ref:`CoC_reporting_manual`. + +We will investigate and respond to all complaints. The NumPy Code of Conduct +Committee and the NumPy Steering Committee (if involved) will protect the +identity of the reporter, and treat the content of complaints as confidential +(unless the reporter agrees otherwise). + +In case of severe and obvious breaches, e.g. personal threat or violent, sexist +or racist language, we will immediately disconnect the originator from NumPy +communication channels; please see the manual for details. + +In cases not involving clear severe and obvious breaches of this code of +conduct, the process for acting on any received code of conduct violation +report will be: + +1. acknowledge report is received +2. reasonable discussion/feedback +3. mediation (if feedback didn't help, and only if both reporter and reportee agree to this) +4. enforcement via transparent decision (see :ref:`CoC_resolutions`) by the + Code of Conduct Committee + +The committee will respond to any report as soon as possible, and at most +within 72 hours. + + +Endnotes +-------- + +We are thankful to the groups behind the following documents, from which we +drew content and inspiration: + +- `The SciPy Code of Conduct <https://docs.scipy.org/doc/scipy/reference/dev/conduct/code_of_conduct.html>`_ + diff --git a/doc/source/dev/conduct/report_handling_manual.rst b/doc/source/dev/conduct/report_handling_manual.rst new file mode 100644 index 000000000..5f5e32f13 --- /dev/null +++ b/doc/source/dev/conduct/report_handling_manual.rst @@ -0,0 +1,218 @@ +.. _CoC_reporting_manual: + +NumPy Code of Conduct - How to follow up on a report +---------------------------------------------------- + +This is the manual followed by NumPy's Code of Conduct Committee. It's used +when we respond to an issue to make sure we're consistent and fair. + +Enforcing the Code of Conduct impacts our community today and for the future. +It's an action that we do not take lightly. When reviewing enforcement +measures, the Code of Conduct Committee will keep the following values and +guidelines in mind: + +* Act in a personal manner rather than impersonal. The Committee can engage + the parties to understand the situation, while respecting the privacy and any + necessary confidentiality of reporters. However, sometimes it is necessary + to communicate with one or more individuals directly: the Committee's goal is + to improve the health of our community rather than only produce a formal + decision. + +* Emphasize empathy for individuals rather than judging behavior, avoiding + binary labels of "good" and "bad/evil". Overt, clear-cut aggression and + harassment exists and we will be address that firmly. But many scenarios + that can prove challenging to resolve are those where normal disagreements + devolve into unhelpful or harmful behavior from multiple parties. + Understanding the full context and finding a path that re-engages all is + hard, but ultimately the most productive for our community. + +* We understand that email is a difficult medium and can be isolating. + Receiving criticism over email, without personal contact, can be + particularly painful. This makes it especially important to keep an + atmosphere of open-minded respect of the views of others. It also means + that we must be transparent in our actions, and that we will do everything + in our power to make sure that all our members are treated fairly and with + sympathy. + +* Discrimination can be subtle and it can be unconscious. It can show itself + as unfairness and hostility in otherwise ordinary interactions. We know + that this does occur, and we will take care to look out for it. We would + very much like to hear from you if you feel you have been treated unfairly, + and we will use these procedures to make sure that your complaint is heard + and addressed. + +* Help increase engagement in good discussion practice: try to identify where + discussion may have broken down and provide actionable information, pointers + and resources that can lead to positive change on these points. + +* Be mindful of the needs of new members: provide them with explicit support + and consideration, with the aim of increasing participation from + underrepresented groups in particular. + +* Individuals come from different cultural backgrounds and native languages. + Try to identify any honest misunderstandings caused by a non-native speaker + and help them understand the issue and what they can change to avoid causing + offence. Complex discussion in a foreign language can be very intimidating, + and we want to grow our diversity also across nationalities and cultures. + +*Mediation*: voluntary, informal mediation is a tool at our disposal. In +contexts such as when two or more parties have all escalated to the point of +inappropriate behavior (something sadly common in human conflict), it may be +useful to facilitate a mediation process. This is only an example: the +Committee can consider mediation in any case, mindful that the process is meant +to be strictly voluntary and no party can be pressured to participate. If the +Committee suggests mediation, it should: + +* Find a candidate who can serve as a mediator. +* Obtain the agreement of the reporter(s). The reporter(s) have complete + freedom to decline the mediation idea, or to propose an alternate mediator. +* Obtain the agreement of the reported person(s). +* Settle on the mediator: while parties can propose a different mediator than + the suggested candidate, only if common agreement is reached on all terms can + the process move forward. +* Establish a timeline for mediation to complete, ideally within two weeks. + +The mediator will engage with all the parties and seek a resolution that is +satisfactory to all. Upon completion, the mediator will provide a report +(vetted by all parties to the process) to the Committee, with recommendations +on further steps. The Committee will then evaluate these results (whether +satisfactory resolution was achieved or not) and decide on any additional +action deemed necessary. + + +How the committee will respond to reports +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When the committee (or a committee member) receives a report, they will first +determine whether the report is about a clear and severe breach (as defined +below). If so, immediate action needs to be taken in addition to the regular +report handling process. + +Clear and severe breach actions ++++++++++++++++++++++++++++++++ + +We know that it is painfully common for internet communication to start at or +devolve into obvious and flagrant abuse. We will deal quickly with clear and +severe breaches like personal threats, violent, sexist or racist language. + +When a member of the Code of Conduct committee becomes aware of a clear and +severe breach, they will do the following: + +* Immediately disconnect the originator from all NumPy communication channels. +* Reply to the reporter that their report has been received and that the + originator has been disconnected. +* In every case, the moderator should make a reasonable effort to contact the + originator, and tell them specifically how their language or actions + qualify as a "clear and severe breach". The moderator should also say + that, if the originator believes this is unfair or they want to be + reconnected to NumPy, they have the right to ask for a review, as below, by + the Code of Conduct Committee. + The moderator should copy this explanation to the Code of Conduct Committee. +* The Code of Conduct Committee will formally review and sign off on all cases + where this mechanism has been applied to make sure it is not being used to + control ordinary heated disagreement. + +Report handling ++++++++++++++++ + +When a report is sent to the committee they will immediately reply to the +reporter to confirm receipt. This reply must be sent within 72 hours, and the +group should strive to respond much quicker than that. + +If a report doesn't contain enough information, the committee will obtain all +relevant data before acting. The committee is empowered to act on the Steering +Council’s behalf in contacting any individuals involved to get a more complete +account of events. + +The committee will then review the incident and determine, to the best of their +ability: + +* What happened. +* Whether this event constitutes a Code of Conduct violation. +* Who are the responsible party(ies). +* Whether this is an ongoing situation, and there is a threat to anyone's + physical safety. + +This information will be collected in writing, and whenever possible the +group's deliberations will be recorded and retained (i.e. chat transcripts, +email discussions, recorded conference calls, summaries of voice conversations, +etc). + +It is important to retain an archive of all activities of this committee to +ensure consistency in behavior and provide institutional memory for the +project. To assist in this, the default channel of discussion for this +committee will be a private mailing list accessible to current and future +members of the committee as well as members of the Steering Council upon +justified request. If the Committee finds the need to use off-list +communications (e.g. phone calls for early/rapid response), it should in all +cases summarize these back to the list so there's a good record of the process. + +The Code of Conduct Committee should aim to have a resolution agreed upon within +two weeks. In the event that a resolution can't be determined in that time, the +committee will respond to the reporter(s) with an update and projected timeline +for resolution. + + +.. _CoC_resolutions: + +Resolutions +~~~~~~~~~~~ + +The committee must agree on a resolution by consensus. If the group cannot reach +consensus and deadlocks for over a week, the group will turn the matter over to +the Steering Council for resolution. + + +Possible responses may include: + +* Taking no further action + + - if we determine no violations have occurred. + - if the matter has been resolved publicly while the committee was considering responses. + +* Coordinating voluntary mediation: if all involved parties agree, the + Committee may facilitate a mediation process as detailed above. +* Remind publicly, and point out that some behavior/actions/language have been + judged inappropriate and why in the current context, or can but hurtful to + some people, requesting the community to self-adjust. +* A private reprimand from the committee to the individual(s) involved. In this + case, the group chair will deliver that reprimand to the individual(s) over + email, cc'ing the group. +* A public reprimand. In this case, the committee chair will deliver that + reprimand in the same venue that the violation occurred, within the limits of + practicality. E.g., the original mailing list for an email violation, but + for a chat room discussion where the person/context may be gone, they can be + reached by other means. The group may choose to publish this message + elsewhere for documentation purposes. +* A request for a public or private apology, assuming the reporter agrees to + this idea: they may at their discretion refuse further contact with the + violator. The chair will deliver this request. The committee may, if it + chooses, attach "strings" to this request: for example, the group may ask a + violator to apologize in order to retain one’s membership on a mailing list. +* A "mutually agreed upon hiatus" where the committee asks the individual to + temporarily refrain from community participation. If the individual chooses + not to take a temporary break voluntarily, the committee may issue a + "mandatory cooling off period". +* A permanent or temporary ban from some or all NumPy spaces (mailing lists, + gitter.im, etc.). The group will maintain records of all such bans so that + they may be reviewed in the future or otherwise maintained. + +Once a resolution is agreed upon, but before it is enacted, the committee will +contact the original reporter and any other affected parties and explain the +proposed resolution. The committee will ask if this resolution is acceptable, +and must note feedback for the record. + +Finally, the committee will make a report to the NumPy Steering Council (as +well as the NumPy core team in the event of an ongoing resolution, such as a +ban). + +The committee will never publicly discuss the issue; all public statements will +be made by the chair of the Code of Conduct Committee or the NumPy Steering +Council. + + +Conflicts of Interest +~~~~~~~~~~~~~~~~~~~~~ + +In the event of any conflict of interest, a committee member must immediately +notify the other members, and recuse themselves if necessary. diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index 04c84eb61..9ce04cc1b 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -5,6 +5,7 @@ Contributing to NumPy .. toctree:: :maxdepth: 3 + conduct/code_of_conduct gitwash/index development_environment releasing diff --git a/doc/source/f2py/compile_session.dat b/doc/source/f2py/compile_session.dat index 0d8408198..5c42742be 100644 --- a/doc/source/f2py/compile_session.dat +++ b/doc/source/f2py/compile_session.dat @@ -1,10 +1,10 @@ ->>> import f2py2e +>>> import numpy.f2py >>> fsource = ''' ... subroutine foo ... print*, "Hello world!" ... end ... ''' ->>> f2py2e.compile(fsource,modulename='hello',verbose=0) +>>> numpy.f2py.compile(fsource, modulename='hello', verbose=0) 0 >>> import hello >>> hello.foo() diff --git a/doc/source/f2py/getting-started.rst b/doc/source/f2py/getting-started.rst index fffd61c45..3d8ea24e4 100644 --- a/doc/source/f2py/getting-started.rst +++ b/doc/source/f2py/getting-started.rst @@ -45,9 +45,9 @@ to run :: - f2py -c fib1.f -m fib1 + python -m numpy.f2py -c fib1.f -m fib1 -This command builds (see ``-c`` flag, execute ``f2py`` without +This command builds (see ``-c`` flag, execute ``python -m numpy.f2py`` without arguments to see the explanation of command line options) an extension module ``fib1.so`` (see ``-m`` flag) to the current directory. Now, in Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``:: @@ -162,7 +162,7 @@ one. :: - f2py fib1.f -m fib2 -h fib1.pyf + python -m numpy.f2py fib1.f -m fib2 -h fib1.pyf The signature file is saved to ``fib1.pyf`` (see ``-h`` flag) and its contents is shown below. @@ -188,7 +188,7 @@ one. :: - f2py -c fib2.pyf fib1.f + python -m numpy.f2py -c fib2.pyf fib1.f In Python:: @@ -243,7 +243,7 @@ __ fib3.f Building the extension module can be now carried out in one command:: - f2py -c -m fib3 fib3.f + python -m numpy.f2py -c -m fib3 fib3.f Notice that the resulting wrapper to ``FIB`` is as "smart" as in previous case:: diff --git a/doc/source/f2py/run_main_session.dat b/doc/source/f2py/run_main_session.dat index 29ecc3dfe..b9a7e1b0d 100644 --- a/doc/source/f2py/run_main_session.dat +++ b/doc/source/f2py/run_main_session.dat @@ -1,14 +1,14 @@ ->>> import f2py2e ->>> r=f2py2e.run_main(['-m','scalar','docs/usersguide/scalar.f']) +>>> import numpy.f2py +>>> r = numpy.f2py.run_main(['-m','scalar','doc/source/f2py/scalar.f']) Reading fortran codes... - Reading file 'docs/usersguide/scalar.f' + Reading file 'doc/source/f2py/scalar.f' (format:fix,strict) Post-processing... Block: scalar Block: FOO Building modules... Building module "scalar"... Wrote C/API module "scalar" to file "./scalarmodule.c" ->>> print r -{'scalar': {'h': ['/home/users/pearu/src_cvs/f2py2e/src/fortranobject.h'], +>>> printr(r) +{'scalar': {'h': ['/home/users/pearu/src_cvs/f2py/src/fortranobject.h'], 'csrc': ['./scalarmodule.c', - '/home/users/pearu/src_cvs/f2py2e/src/fortranobject.c']}} + '/home/users/pearu/src_cvs/f2py/src/fortranobject.c']}} diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index a6f093154..0f5068e0e 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -3,7 +3,19 @@ Using F2PY =========== F2PY can be used either as a command line tool ``f2py`` or as a Python -module ``f2py2e``. +module ``numpy.f2py``. While we try to install the command line tool as part +of the numpy setup, some platforms like Windows make it difficult to +reliably put the executable on the ``PATH``. We will refer to ``f2py`` +in this document but you may have to run it as a module + +``` +python -m numpy.f2py +``` + +If you run ``f2py`` with no arguments, and the line ``numpy Version`` at the +end matches the NumPy version printed from ``python -m numpy.f2py``, then you +can use the shorter version. If not, or if you cannot run ``f2py``, you should +replace all calls to ``f2py`` here with the longer version. Command ``f2py`` ================= @@ -194,15 +206,15 @@ Other options: Execute ``f2py`` without any options to get an up-to-date list of available options. -Python module ``f2py2e`` -========================= +Python module ``numpy.f2py`` +============================ .. warning:: - The current Python interface to ``f2py2e`` module is not mature and - may change in future depending on users needs. + The current Python interface to the ``f2py`` module is not mature and + may change in the future. -The following functions are provided by the ``f2py2e`` module: +The following functions are provided by the ``numpy.f2py`` module: ``run_main(<list>)`` Equivalent to running:: diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py index b4aeaa277..7f2541667 100644 --- a/numpy/core/code_generators/generate_numpy_api.py +++ b/numpy/core/code_generators/generate_numpy_api.py @@ -46,11 +46,11 @@ static int _import_array(void) { int st; - PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray"); + PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); PyObject *c_api = NULL; if (numpy == NULL) { - PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); + PyErr_SetString(PyExc_ImportError, "numpy.core._multiarray_umath failed to import"); return -1; } c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); diff --git a/numpy/core/code_generators/generate_ufunc_api.py b/numpy/core/code_generators/generate_ufunc_api.py index 3bcf137f7..1b0143e88 100644 --- a/numpy/core/code_generators/generate_ufunc_api.py +++ b/numpy/core/code_generators/generate_ufunc_api.py @@ -36,11 +36,12 @@ static void **PyUFunc_API=NULL; static NPY_INLINE int _import_umath(void) { - PyObject *numpy = PyImport_ImportModule("numpy.core.umath"); + PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); PyObject *c_api = NULL; if (numpy == NULL) { - PyErr_SetString(PyExc_ImportError, "numpy.core.umath failed to import"); + PyErr_SetString(PyExc_ImportError, + "numpy.core._multiarray_umath failed to import"); return -1; } c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index 632bcb41f..6dc01877b 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -1080,6 +1080,8 @@ def make_code(funcdict, filename): Please make changes to the code generator program (%s) **/ #include "cpuid.h" + #include "ufunc_object.h" + #include "ufunc_type_resolution.h" %s static int diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py new file mode 100644 index 000000000..673328397 --- /dev/null +++ b/numpy/core/multiarray.py @@ -0,0 +1,37 @@ +""" +Create the numpy.core.multiarray namespace for backward compatibility. In v1.16 +the multiarray and umath c-extension modules were merged into a single +_multiarray_umath extension module. So we replicate the old namespace +by importing from the extension module. + +""" + +from . import _multiarray_umath +from numpy.core._multiarray_umath import * +from numpy.core._multiarray_umath import ( + _fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string, + _ARRAY_API, _monotonicity + ) + +__all__ = [ + '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS', + 'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS', + 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI', + 'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose', + '_flagdict', '_insert', '_reconstruct', '_vec_string', '_monotonicity', + 'add_docstring', 'arange', 'array', 'bincount', 'broadcast', + 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast', + 'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2', + 'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data', + 'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype', + 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat', + 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'getbuffer', 'inner', + 'int_asbuffer', 'interp', 'interp_complex', 'is_busday', 'lexsort', + 'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', + 'nested_iters', 'newbuffer', 'normalize_axis_index', 'packbits', + 'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar', + 'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops', + 'set_string_function', 'set_typeDict', 'shares_memory', 'test_interrupt', + 'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot', + 'where', 'zeros'] + diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index e5570791a..1b4818b76 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -2850,16 +2850,11 @@ class errstate(object): Notes ----- - The ``with`` statement was introduced in Python 2.5, and can only be used - there by importing it: ``from __future__ import with_statement``. In - earlier Python versions the ``with`` statement is not available. - For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. Examples -------- - >>> from __future__ import with_statement # use 'with' in Python 2.5 >>> olderr = np.seterr(all='ignore') # Set error handling to known state. >>> np.arange(3) / 0. @@ -2919,10 +2914,7 @@ True_ = bool_(True) def extend_all(module): existing = set(__all__) - try: - mall = getattr(module, '__all__') - except AttributeError: - mall = [k for k in module.__dict__.keys() if not k.startswith('_')] + mall = getattr(module, '__all__') for a in mall: if a not in existing: __all__.append(a) diff --git a/numpy/core/setup.py b/numpy/core/setup.py index f826b278f..1588a2634 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -514,9 +514,9 @@ def configuration(parent_package='',top_path=None): def generate_numpyconfig_h(ext, build_dir): """Depends on config.h: generate_config_h has to be called before !""" - # put private include directory in build_dir on search path + # put common include directory in build_dir on search path # allows using code generation in headers headers - config.add_include_dirs(join(build_dir, "src", "private")) + config.add_include_dirs(join(build_dir, "src", "common")) config.add_include_dirs(join(build_dir, "src", "npymath")) target = join(build_dir, header_dir, '_numpyconfig.h') @@ -603,7 +603,7 @@ def configuration(parent_package='',top_path=None): generate_numpy_api = generate_api_func('generate_numpy_api') generate_ufunc_api = generate_api_func('generate_ufunc_api') - config.add_include_dirs(join(local_dir, "src", "private")) + config.add_include_dirs(join(local_dir, "src", "common")) config.add_include_dirs(join(local_dir, "src")) config.add_include_dirs(join(local_dir)) @@ -700,9 +700,9 @@ def configuration(parent_package='',top_path=None): npysort_sources = [join('src', 'npysort', 'quicksort.c.src'), join('src', 'npysort', 'mergesort.c.src'), join('src', 'npysort', 'heapsort.c.src'), - join('src', 'private', 'npy_partition.h.src'), + join('src', 'common', 'npy_partition.h.src'), join('src', 'npysort', 'selection.c.src'), - join('src', 'private', 'npy_binsearch.h.src'), + join('src', 'common', 'npy_binsearch.h.src'), join('src', 'npysort', 'binsearch.c.src'), ] config.add_library('npysort', @@ -710,16 +710,66 @@ def configuration(parent_package='',top_path=None): include_dirs=[]) ####################################################################### - # multiarray module # + # multiarray_tests module # + ####################################################################### + + config.add_extension('_multiarray_tests', + sources=[join('src', 'multiarray', '_multiarray_tests.c.src'), + join('src', 'common', 'mem_overlap.c')], + depends=[join('src', 'common', 'mem_overlap.h'), + join('src', 'common', 'npy_extint128.h')], + libraries=['npymath']) + + ####################################################################### + # _multiarray_umath module - common part # + ####################################################################### + + common_deps = [ + join('src', 'common', 'array_assign.h'), + join('src', 'common', 'binop_override.h'), + join('src', 'common', 'cblasfuncs.h'), + join('src', 'common', 'lowlevel_strided_loops.h'), + join('src', 'common', 'mem_overlap.h'), + join('src', 'common', 'npy_config.h'), + join('src', 'common', 'npy_extint128.h'), + join('src', 'common', 'npy_longdouble.h'), + join('src', 'common', 'templ_common.h.src'), + join('src', 'common', 'ucsnarrow.h'), + join('src', 'common', 'ufunc_override.h'), + join('src', 'common', 'umathmodule.h'), + ] + + common_src = [ + join('src', 'common', 'array_assign.c'), + join('src', 'common', 'mem_overlap.c'), + join('src', 'common', 'npy_longdouble.c'), + join('src', 'common', 'templ_common.h.src'), + join('src', 'common', 'ucsnarrow.c'), + join('src', 'common', 'ufunc_override.c'), + ] + + blas_info = get_info('blas_opt', 0) + if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []): + extra_info = blas_info + # These files are also in MANIFEST.in so that they are always in + # the source distribution independently of HAVE_CBLAS. + common_src.extend([join('src', 'common', 'cblasfuncs.c'), + join('src', 'common', 'python_xerbla.c'), + ]) + if uses_accelerate_framework(blas_info): + common_src.extend(get_sgemv_fix()) + else: + extra_info = {} + + ####################################################################### + # _multiarray_umath module - multiarray part # ####################################################################### multiarray_deps = [ join('src', 'multiarray', 'arrayobject.h'), join('src', 'multiarray', 'arraytypes.h'), - join('src', 'multiarray', 'array_assign.h'), join('src', 'multiarray', 'buffer.h'), join('src', 'multiarray', 'calculation.h'), - join('src', 'multiarray', 'cblasfuncs.h'), join('src', 'multiarray', 'common.h'), join('src', 'multiarray', 'convert_datatype.h'), join('src', 'multiarray', 'convert.h'), @@ -742,17 +792,8 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'shape.h'), join('src', 'multiarray', 'strfuncs.h'), join('src', 'multiarray', 'typeinfo.h'), - join('src', 'multiarray', 'ucsnarrow.h'), join('src', 'multiarray', 'usertypes.h'), join('src', 'multiarray', 'vdot.h'), - join('src', 'private', 'npy_config.h'), - join('src', 'private', 'templ_common.h.src'), - join('src', 'private', 'lowlevel_strided_loops.h'), - join('src', 'private', 'mem_overlap.h'), - join('src', 'private', 'npy_longdouble.h'), - join('src', 'private', 'ufunc_override.h'), - join('src', 'private', 'binop_override.h'), - join('src', 'private', 'npy_extint128.h'), join('include', 'numpy', 'arrayobject.h'), join('include', 'numpy', '_neighborhood_iterator_imp.h'), join('include', 'numpy', 'npy_endian.h'), @@ -778,7 +819,6 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'alloc.c'), join('src', 'multiarray', 'arrayobject.c'), join('src', 'multiarray', 'arraytypes.c.src'), - join('src', 'multiarray', 'array_assign.c'), join('src', 'multiarray', 'array_assign_scalar.c'), join('src', 'multiarray', 'array_assign_array.c'), join('src', 'multiarray', 'buffer.c'), @@ -821,40 +861,11 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'temp_elide.c'), join('src', 'multiarray', 'typeinfo.c'), join('src', 'multiarray', 'usertypes.c'), - join('src', 'multiarray', 'ucsnarrow.c'), join('src', 'multiarray', 'vdot.c'), - join('src', 'private', 'templ_common.h.src'), - join('src', 'private', 'mem_overlap.c'), - join('src', 'private', 'npy_longdouble.c'), - join('src', 'private', 'ufunc_override.c'), ] - blas_info = get_info('blas_opt', 0) - if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []): - extra_info = blas_info - # These files are also in MANIFEST.in so that they are always in - # the source distribution independently of HAVE_CBLAS. - multiarray_src.extend([join('src', 'multiarray', 'cblasfuncs.c'), - join('src', 'multiarray', 'python_xerbla.c'), - ]) - if uses_accelerate_framework(blas_info): - multiarray_src.extend(get_sgemv_fix()) - else: - extra_info = {} - - config.add_extension('multiarray', - sources=multiarray_src + - [generate_config_h, - generate_numpyconfig_h, - generate_numpy_api, - join(codegen_dir, 'generate_numpy_api.py'), - join('*.py')], - depends=deps + multiarray_deps, - libraries=['npymath', 'npysort'], - extra_info=extra_info) - ####################################################################### - # umath module # + # _multiarray_umath module - umath part # ####################################################################### def generate_umath_c(ext, build_dir): @@ -883,34 +894,34 @@ def configuration(parent_package='',top_path=None): join('src', 'umath', 'scalarmath.c.src'), join('src', 'umath', 'ufunc_type_resolution.c'), join('src', 'umath', 'override.c'), - join('src', 'private', 'mem_overlap.c'), - join('src', 'private', 'npy_longdouble.c'), - join('src', 'private', 'ufunc_override.c')] + ] umath_deps = [ generate_umath_py, join('include', 'numpy', 'npy_math.h'), join('include', 'numpy', 'halffloat.h'), join('src', 'multiarray', 'common.h'), - join('src', 'private', 'templ_common.h.src'), + join('src', 'common', 'templ_common.h.src'), join('src', 'umath', 'simd.inc.src'), join('src', 'umath', 'override.h'), join(codegen_dir, 'generate_ufunc_api.py'), - join('src', 'private', 'lowlevel_strided_loops.h'), - join('src', 'private', 'mem_overlap.h'), - join('src', 'private', 'npy_longdouble.h'), - join('src', 'private', 'ufunc_override.h'), - join('src', 'private', 'binop_override.h')] + npymath_sources - - config.add_extension('umath', - sources=umath_src + + ] + + config.add_extension('_multiarray_umath', + sources=multiarray_src + umath_src + + npymath_sources + common_src + [generate_config_h, - generate_numpyconfig_h, - generate_umath_c, - generate_ufunc_api], - depends=deps + umath_deps, - libraries=['npymath'], - ) + generate_numpyconfig_h, + generate_numpy_api, + join(codegen_dir, 'generate_numpy_api.py'), + join('*.py'), + generate_umath_c, + generate_ufunc_api, + ], + depends=deps + multiarray_deps + umath_deps + + common_deps, + libraries=['npymath', 'npysort'], + extra_info=extra_info) ####################################################################### # umath_tests module # @@ -933,16 +944,6 @@ def configuration(parent_package='',top_path=None): config.add_extension('_struct_ufunc_tests', sources=[join('src', 'umath', '_struct_ufunc_tests.c.src')]) - ####################################################################### - # multiarray_tests module # - ####################################################################### - - config.add_extension('_multiarray_tests', - sources=[join('src', 'multiarray', '_multiarray_tests.c.src'), - join('src', 'private', 'mem_overlap.c')], - depends=[join('src', 'private', 'mem_overlap.h'), - join('src', 'private', 'npy_extint128.h')], - libraries=['npymath']) ####################################################################### # operand_flag_tests module # diff --git a/numpy/core/src/multiarray/array_assign.c b/numpy/core/src/common/array_assign.c index a48e245d8..a48e245d8 100644 --- a/numpy/core/src/multiarray/array_assign.c +++ b/numpy/core/src/common/array_assign.c diff --git a/numpy/core/src/multiarray/array_assign.h b/numpy/core/src/common/array_assign.h index 3fecff007..3fecff007 100644 --- a/numpy/core/src/multiarray/array_assign.h +++ b/numpy/core/src/common/array_assign.h diff --git a/numpy/core/src/private/binop_override.h b/numpy/core/src/common/binop_override.h index 47df63e38..47df63e38 100644 --- a/numpy/core/src/private/binop_override.h +++ b/numpy/core/src/common/binop_override.h diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/common/cblasfuncs.c index 6460c5db1..6460c5db1 100644 --- a/numpy/core/src/multiarray/cblasfuncs.c +++ b/numpy/core/src/common/cblasfuncs.c diff --git a/numpy/core/src/multiarray/cblasfuncs.h b/numpy/core/src/common/cblasfuncs.h index 66ce4ca5b..66ce4ca5b 100644 --- a/numpy/core/src/multiarray/cblasfuncs.h +++ b/numpy/core/src/common/cblasfuncs.h diff --git a/numpy/core/src/private/get_attr_string.h b/numpy/core/src/common/get_attr_string.h index bec87c5ed..bec87c5ed 100644 --- a/numpy/core/src/private/get_attr_string.h +++ b/numpy/core/src/common/get_attr_string.h diff --git a/numpy/core/src/private/lowlevel_strided_loops.h b/numpy/core/src/common/lowlevel_strided_loops.h index f9c671f77..f9c671f77 100644 --- a/numpy/core/src/private/lowlevel_strided_loops.h +++ b/numpy/core/src/common/lowlevel_strided_loops.h diff --git a/numpy/core/src/private/mem_overlap.c b/numpy/core/src/common/mem_overlap.c index 21db1893b..21db1893b 100644 --- a/numpy/core/src/private/mem_overlap.c +++ b/numpy/core/src/common/mem_overlap.c diff --git a/numpy/core/src/private/mem_overlap.h b/numpy/core/src/common/mem_overlap.h index 8044f1663..8044f1663 100644 --- a/numpy/core/src/private/mem_overlap.h +++ b/numpy/core/src/common/mem_overlap.h diff --git a/numpy/core/src/private/npy_binsearch.h.src b/numpy/core/src/common/npy_binsearch.h.src index ce3b34b0e..ce3b34b0e 100644 --- a/numpy/core/src/private/npy_binsearch.h.src +++ b/numpy/core/src/common/npy_binsearch.h.src diff --git a/numpy/core/src/private/npy_cblas.h b/numpy/core/src/common/npy_cblas.h index a083f3bcc..a083f3bcc 100644 --- a/numpy/core/src/private/npy_cblas.h +++ b/numpy/core/src/common/npy_cblas.h diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/common/npy_config.h index 8143e7719..8143e7719 100644 --- a/numpy/core/src/private/npy_config.h +++ b/numpy/core/src/common/npy_config.h diff --git a/numpy/core/src/private/npy_extint128.h b/numpy/core/src/common/npy_extint128.h index a887ff317..a887ff317 100644 --- a/numpy/core/src/private/npy_extint128.h +++ b/numpy/core/src/common/npy_extint128.h diff --git a/numpy/core/src/private/npy_fpmath.h b/numpy/core/src/common/npy_fpmath.h index dbb3fb23d..dbb3fb23d 100644 --- a/numpy/core/src/private/npy_fpmath.h +++ b/numpy/core/src/common/npy_fpmath.h diff --git a/numpy/core/src/private/npy_import.h b/numpy/core/src/common/npy_import.h index 221e1e645..221e1e645 100644 --- a/numpy/core/src/private/npy_import.h +++ b/numpy/core/src/common/npy_import.h diff --git a/numpy/core/src/private/npy_longdouble.c b/numpy/core/src/common/npy_longdouble.c index 508fbceac..508fbceac 100644 --- a/numpy/core/src/private/npy_longdouble.c +++ b/numpy/core/src/common/npy_longdouble.c diff --git a/numpy/core/src/private/npy_longdouble.h b/numpy/core/src/common/npy_longdouble.h index 036b53070..036b53070 100644 --- a/numpy/core/src/private/npy_longdouble.h +++ b/numpy/core/src/common/npy_longdouble.h diff --git a/numpy/core/src/private/npy_partition.h.src b/numpy/core/src/common/npy_partition.h.src index a22cf911c..a22cf911c 100644 --- a/numpy/core/src/private/npy_partition.h.src +++ b/numpy/core/src/common/npy_partition.h.src diff --git a/numpy/core/src/private/npy_pycompat.h b/numpy/core/src/common/npy_pycompat.h index aa0b5c122..aa0b5c122 100644 --- a/numpy/core/src/private/npy_pycompat.h +++ b/numpy/core/src/common/npy_pycompat.h diff --git a/numpy/core/src/private/npy_sort.h b/numpy/core/src/common/npy_sort.h index 8c6f05623..8c6f05623 100644 --- a/numpy/core/src/private/npy_sort.h +++ b/numpy/core/src/common/npy_sort.h diff --git a/numpy/core/src/multiarray/python_xerbla.c b/numpy/core/src/common/python_xerbla.c index bdf0b9058..bdf0b9058 100644 --- a/numpy/core/src/multiarray/python_xerbla.c +++ b/numpy/core/src/common/python_xerbla.c diff --git a/numpy/core/src/private/templ_common.h.src b/numpy/core/src/common/templ_common.h.src index a65a00758..a65a00758 100644 --- a/numpy/core/src/private/templ_common.h.src +++ b/numpy/core/src/common/templ_common.h.src diff --git a/numpy/core/src/multiarray/ucsnarrow.c b/numpy/core/src/common/ucsnarrow.c index 8e293e9f2..8e293e9f2 100644 --- a/numpy/core/src/multiarray/ucsnarrow.c +++ b/numpy/core/src/common/ucsnarrow.c diff --git a/numpy/core/src/multiarray/ucsnarrow.h b/numpy/core/src/common/ucsnarrow.h index fe31a5e25..fe31a5e25 100644 --- a/numpy/core/src/multiarray/ucsnarrow.h +++ b/numpy/core/src/common/ucsnarrow.h diff --git a/numpy/core/src/private/ufunc_override.c b/numpy/core/src/common/ufunc_override.c index 33b54c665..33b54c665 100644 --- a/numpy/core/src/private/ufunc_override.c +++ b/numpy/core/src/common/ufunc_override.c diff --git a/numpy/core/src/private/ufunc_override.h b/numpy/core/src/common/ufunc_override.h index 5b269d270..5b269d270 100644 --- a/numpy/core/src/private/ufunc_override.h +++ b/numpy/core/src/common/ufunc_override.h diff --git a/numpy/core/src/common/umathmodule.h b/numpy/core/src/common/umathmodule.h new file mode 100644 index 000000000..6998596ee --- /dev/null +++ b/numpy/core/src/common/umathmodule.h @@ -0,0 +1,8 @@ +#include "__umath_generated.c" +#include "__ufunc_api.c" + +PyObject * add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args); +PyObject * ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)); +int initumath(PyObject *m); + + diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c index df9b9cec4..7f837901c 100644 --- a/numpy/core/src/multiarray/datetime.c +++ b/numpy/core/src/multiarray/datetime.c @@ -3747,6 +3747,27 @@ recursive_find_object_datetime64_type(PyObject *obj, } /* + * handler function for PyDelta values + * which may also be in a 0 dimensional + * NumPy array + */ +static int +delta_checker(PyArray_DatetimeMetaData *meta) +{ + PyArray_DatetimeMetaData tmp_meta; + + tmp_meta.base = NPY_FR_us; + tmp_meta.num = 1; + + /* Combine it with 'meta' */ + if (compute_datetime_metadata_greatest_common_divisor( + meta, &tmp_meta, meta, 0, 0) < 0) { + return -1; + } + return 0; +} + +/* * Recursively determines the metadata for an NPY_TIMEDELTA dtype. * * Returns 0 on success, -1 on failure. @@ -3783,6 +3804,28 @@ recursive_find_object_timedelta64_type(PyObject *obj, else if (arr_dtype->type_num != NPY_OBJECT) { return 0; } + else { + if (PyArray_NDIM(arr) == 0) { + /* + * special handling of 0 dimensional NumPy object + * arrays, which may be indexed to retrieve their + * single object using [()], but not by using + * __getitem__(integer) approaches + */ + PyObject *item, *meth, *args; + + meth = PyObject_GetAttrString(obj, "__getitem__"); + args = Py_BuildValue("(())"); + item = PyObject_CallObject(meth, args); + /* + * NOTE: may need other type checks here in the future + * for expanded 0 D datetime array conversions? + */ + if (PyDelta_Check(item)) { + return delta_checker(meta); + } + } + } } /* Datetime scalar -> use its metadata */ else if (PyArray_IsScalar(obj, Timedelta)) { @@ -3803,18 +3846,7 @@ recursive_find_object_timedelta64_type(PyObject *obj, } /* Python timedelta object -> 'us' */ else if (PyDelta_Check(obj)) { - PyArray_DatetimeMetaData tmp_meta; - - tmp_meta.base = NPY_FR_us; - tmp_meta.num = 1; - - /* Combine it with 'meta' */ - if (compute_datetime_metadata_greatest_common_divisor(meta, - &tmp_meta, meta, 0, 0) < 0) { - return -1; - } - - return 0; + return delta_checker(meta); } /* Now check if what we have left is a sequence for recursion */ diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index a0dc98f0e..834a8282b 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -1439,6 +1439,12 @@ PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at) PyObject *obj2; obj2 = PyUnicode_AsASCIIString(obj); if (obj2 == NULL) { + /* Convert the exception into a TypeError */ + PyObject *err = PyErr_Occurred(); + if (PyErr_GivenExceptionMatches(err, PyExc_UnicodeEncodeError)) { + PyErr_SetString(PyExc_TypeError, + "data type not understood"); + } return NPY_FAIL; } retval = PyArray_DescrConverter(obj2, at); @@ -2401,7 +2407,7 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args)) if (ret == NULL) { return NULL; } - mod = PyImport_ImportModule("numpy.core.multiarray"); + mod = PyImport_ImportModule("numpy.core._multiarray_umath"); if (mod == NULL) { Py_DECREF(ret); return NULL; diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index 2e836d1d0..3d2cce5e1 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -1566,7 +1566,7 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args)) if (ret == NULL) { return NULL; } - mod = PyImport_ImportModule("numpy.core.multiarray"); + mod = PyImport_ImportModule("numpy.core._multiarray_umath"); if (mod == NULL) { Py_DECREF(ret); return NULL; diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index dc571dfcb..8f782cff6 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -19,6 +19,7 @@ #include "structmember.h" #define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _UMATHMODULE #define _MULTIARRAYMODULE #include <numpy/npy_common.h> #include "numpy/arrayobject.h" @@ -54,7 +55,6 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "ctors.h" #include "array_assign.h" #include "common.h" -#include "ufunc_override.h" #include "multiarraymodule.h" #include "cblasfuncs.h" #include "vdot.h" @@ -67,6 +67,17 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "get_attr_string.h" /* + ***************************************************************************** + ** INCLUDE GENERATED CODE ** + ***************************************************************************** + */ +#include "funcs.inc" +#include "loops.h" +#include "umathmodule.h" + +NPY_NO_EXPORT int initscalarmath(PyObject *); + +/* * global variable to determine if legacy printing is enabled, accessible from * C. For simplicity the mode is encoded as an integer where '0' means no * legacy mode, and '113' means 1.13 legacy mode. We can upgrade this if we @@ -4365,6 +4376,18 @@ static struct PyMethodDef array_module_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"set_legacy_print_mode", (PyCFunction)set_legacy_print_mode, METH_VARARGS, NULL}, + /* from umath */ + {"frompyfunc", + (PyCFunction) ufunc_frompyfunc, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"seterrobj", + (PyCFunction) ufunc_seterr, + METH_VARARGS, NULL}, + {"geterrobj", + (PyCFunction) ufunc_geterr, + METH_VARARGS, NULL}, + {"_add_newdoc_ufunc", (PyCFunction)add_newdoc_ufunc, + METH_VARARGS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; @@ -4382,9 +4405,6 @@ static struct PyMethodDef array_module_methods[] = { static int setup_scalartypes(PyObject *NPY_UNUSED(dict)) { - initialize_casting_tables(); - initialize_numeric_types(); - if (PyType_Ready(&PyBool_Type) < 0) { return -1; } @@ -4624,7 +4644,7 @@ intern_strings(void) #if defined(NPY_PY3K) static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, - "multiarray", + "_multiarray_umath", NULL, -1, array_module_methods, @@ -4638,10 +4658,10 @@ static struct PyModuleDef moduledef = { /* Initialization function for the module */ #if defined(NPY_PY3K) #define RETVAL(x) x -PyMODINIT_FUNC PyInit_multiarray(void) { +PyMODINIT_FUNC PyInit__multiarray_umath(void) { #else #define RETVAL(x) -PyMODINIT_FUNC initmultiarray(void) { +PyMODINIT_FUNC init_multiarray_umath(void) { #endif PyObject *m, *d, *s; PyObject *c_api; @@ -4650,7 +4670,7 @@ PyMODINIT_FUNC initmultiarray(void) { #if defined(NPY_PY3K) m = PyModule_Create(&moduledef); #else - m = Py_InitModule("multiarray", array_module_methods); + m = Py_InitModule("_multiarray_umath", array_module_methods); #endif if (!m) { goto err; @@ -4684,6 +4704,17 @@ PyMODINIT_FUNC initmultiarray(void) { * static structure slots with functions from the Python C_API. */ PyArray_Type.tp_hash = PyObject_HashNotImplemented; + + /* Load the ufunc operators into the array module's namespace */ + if (InitOperators(d) < 0) { + goto err; + } + + initialize_casting_tables(); + initialize_numeric_types(); + if(initscalarmath(m) < 0) + goto err; + if (PyType_Ready(&PyArray_Type) < 0) { goto err; } @@ -4730,6 +4761,16 @@ PyMODINIT_FUNC initmultiarray(void) { PyDict_SetItemString(d, "_ARRAY_API", c_api); Py_DECREF(c_api); + c_api = NpyCapsule_FromVoidPtr((void *)PyUFunc_API, NULL); + if (c_api == NULL) { + goto err; + } + PyDict_SetItemString(d, "_UFUNC_API", c_api); + Py_DECREF(c_api); + if (PyErr_Occurred()) { + goto err; + } + /* * PyExc_Exception should catch all the standard errors that are * now raised instead of the string exception "multiarray.error" @@ -4806,7 +4847,9 @@ PyMODINIT_FUNC initmultiarray(void) { if (set_typeinfo(d) != 0) { goto err; } - + if (initumath(m) != 0) { + goto err; + } return RETVAL(m); err: diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index a32aa47ab..fdd4d7878 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -538,19 +538,22 @@ _void_to_hex(const char* argbuf, const Py_ssize_t arglen, } static PyObject * +_void_scalar_repr(PyObject *obj) { + static PyObject *reprfunc = NULL; + npy_cache_import("numpy.core.arrayprint", + "_void_scalar_repr", &reprfunc); + if (reprfunc == NULL) { + return NULL; + } + return PyObject_CallFunction(reprfunc, "O", obj); +} + +static PyObject * voidtype_repr(PyObject *self) { PyVoidScalarObject *s = (PyVoidScalarObject*) self; if (PyDataType_HASFIELDS(s->descr)) { - static PyObject *reprfunc = NULL; - - npy_cache_import("numpy.core.arrayprint", - "_void_scalar_repr", &reprfunc); - if (reprfunc == NULL) { - return NULL; - } - - return PyObject_CallFunction(reprfunc, "O", self); + return _void_scalar_repr(self); } return _void_to_hex(s->obval, s->descr->elsize, "void(b'", "\\x", "')"); } @@ -560,15 +563,7 @@ voidtype_str(PyObject *self) { PyVoidScalarObject *s = (PyVoidScalarObject*) self; if (PyDataType_HASFIELDS(s->descr)) { - static PyObject *reprfunc = NULL; - - npy_cache_import("numpy.core.arrayprint", - "_void_scalar_repr", &reprfunc); - if (reprfunc == NULL) { - return NULL; - } - - return PyObject_CallFunction(reprfunc, "O", self); + return _void_scalar_repr(self); } return _void_to_hex(s->obval, s->descr->elsize, "b'", "\\x", "'"); } @@ -1875,7 +1870,7 @@ gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args)) } #endif - mod = PyImport_ImportModule("numpy.core.multiarray"); + mod = PyImport_ImportModule("numpy.core._multiarray_umath"); if (mod == NULL) { return NULL; } diff --git a/numpy/core/src/umath/cpuid.c b/numpy/core/src/umath/cpuid.c index 912d51eeb..6744ceb05 100644 --- a/numpy/core/src/umath/cpuid.c +++ b/numpy/core/src/umath/cpuid.c @@ -1,13 +1,11 @@ #define _UMATHMODULE +#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include <Python.h> #include "npy_config.h" -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#define NO_IMPORT_ARRAY - #include "cpuid.h" #define XCR_XFEATURE_ENABLED_MASK 0x0 diff --git a/numpy/core/src/umath/extobj.c b/numpy/core/src/umath/extobj.c index 188054e22..aea1815e8 100644 --- a/numpy/core/src/umath/extobj.c +++ b/numpy/core/src/umath/extobj.c @@ -1,13 +1,11 @@ #define _UMATHMODULE +#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include <Python.h> #include "npy_config.h" -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#define NO_IMPORT_ARRAY - #include "npy_pycompat.h" #include "extobj.h" diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 0b02031a7..66b69f555 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1,14 +1,12 @@ /* -*- c -*- */ #define _UMATHMODULE +#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "Python.h" #include "npy_config.h" -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#define NO_IMPORT_ARRAY - #include "numpy/npy_common.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" @@ -2226,9 +2224,13 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED { /* */ BINARY_LOOP { - const npy_half in1 = *(npy_half *)ip1; + npy_half in1 = *(npy_half *)ip1; const npy_half in2 = *(npy_half *)ip2; - *((npy_half *)op1) = (@OP@(in1, in2) || npy_half_isnan(in1)) ? in1 : in2; + in1 = (@OP@(in1, in2) || npy_half_isnan(in1)) ? in1 : in2; + if (npy_half_isnan(in1)) { + npy_set_floatstatus_invalid(); + } + *((npy_half *)op1) = in1; } } /**end repeat**/ @@ -2784,20 +2786,20 @@ NPY_NO_EXPORT void @TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { BINARY_LOOP { - const @ftype@ in1r = ((@ftype@ *)ip1)[0]; - const @ftype@ in1i = ((@ftype@ *)ip1)[1]; + @ftype@ in1r = ((@ftype@ *)ip1)[0]; + @ftype@ in1i = ((@ftype@ *)ip1)[1]; const @ftype@ in2r = ((@ftype@ *)ip2)[0]; const @ftype@ in2i = ((@ftype@ *)ip2)[1]; - if (@OP@(in1r, in1i, in2r, in2i) || npy_isnan(in1r) || npy_isnan(in1i)) { - ((@ftype@ *)op1)[0] = in1r; - ((@ftype@ *)op1)[1] = in1i; + if ( !(@OP@(in1r, in1i, in2r, in2i) || npy_isnan(in1r) || npy_isnan(in1i))) { + in1r = in2r; + in1i = in2i; } - else { - ((@ftype@ *)op1)[0] = in2r; - ((@ftype@ *)op1)[1] = in2i; + if (npy_isnan(in1r) || npy_isnan(in1i)) { + npy_set_floatstatus_invalid(); } + ((@ftype@ *)op1)[0] = in1r; + ((@ftype@ *)op1)[1] = in1i; } - npy_clear_floatstatus_barrier((char*)dimensions); } /**end repeat1**/ diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c index 8136d7b3f..6d04ce372 100644 --- a/numpy/core/src/umath/reduction.c +++ b/numpy/core/src/umath/reduction.c @@ -7,15 +7,13 @@ * See LICENSE.txt for the license. */ #define _UMATHMODULE +#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define PY_SSIZE_T_CLEAN #include <Python.h> #include "npy_config.h" -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#define NO_IMPORT_ARRAY - #include <numpy/arrayobject.h> #include "npy_config.h" diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src index 3e29c4b4e..e98d9f865 100644 --- a/numpy/core/src/umath/scalarmath.c.src +++ b/numpy/core/src/umath/scalarmath.c.src @@ -7,13 +7,11 @@ */ #define _UMATHMODULE +#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "Python.h" #include "npy_config.h" -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#define NO_IMPORT_ARRAY - #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "numpy/arrayscalars.h" diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 20c448d8b..459b0a594 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -24,15 +24,13 @@ * */ #define _UMATHMODULE +#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "Python.h" #include "npy_config.h" -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#define NO_IMPORT_ARRAY - #include "npy_pycompat.h" #include "numpy/arrayobject.h" diff --git a/numpy/core/src/umath/ufunc_object.h b/numpy/core/src/umath/ufunc_object.h index 5438270f1..f5de9f9b7 100644 --- a/numpy/core/src/umath/ufunc_object.h +++ b/numpy/core/src/umath/ufunc_object.h @@ -1,6 +1,8 @@ #ifndef _NPY_UMATH_UFUNC_OBJECT_H_ #define _NPY_UMATH_UFUNC_OBJECT_H_ +#include <numpy/ufuncobject.h> + NPY_NO_EXPORT PyObject * ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args); diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 1766ba564..807b03512 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -9,14 +9,12 @@ * See LICENSE.txt for the license. */ #define _UMATHMODULE +#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "Python.h" #include "npy_config.h" -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#define NO_IMPORT_ARRAY - #include "npy_pycompat.h" #include "numpy/ufuncobject.h" diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c index 9291a5138..20bd2b0a8 100644 --- a/numpy/core/src/umath/umathmodule.c +++ b/numpy/core/src/umath/umathmodule.c @@ -16,12 +16,12 @@ * __ufunc_api.c */ #define _UMATHMODULE +#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "Python.h" #include "npy_config.h" -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" @@ -30,20 +30,6 @@ #include "numpy/npy_math.h" -/* - ***************************************************************************** - ** INCLUDE GENERATED CODE ** - ***************************************************************************** - */ -#include "funcs.inc" -#include "loops.h" -#include "ufunc_object.h" -#include "ufunc_type_resolution.h" -#include "__umath_generated.c" -#include "__ufunc_api.c" - -NPY_NO_EXPORT int initscalarmath(PyObject *); - static PyUFuncGenericFunction pyfunc_functions[] = {PyUFunc_On_Om}; static int @@ -82,7 +68,7 @@ object_ufunc_loop_selector(PyUFuncObject *ufunc, return 0; } -static PyObject * +PyObject * ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)) { /* Keywords are ignored for now */ @@ -179,7 +165,7 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS } /* docstring in numpy.add_newdocs.py */ -static PyObject * +PyObject * add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args) { PyUFuncObject *ufunc; @@ -270,97 +256,24 @@ intern_strings(void) npy_um_str_array_wrap && npy_um_str_array_finalize && npy_um_str_ufunc; } -/* Setup the umath module */ -/* Remove for time being, it is declared in __ufunc_api.h */ -/*static PyTypeObject PyUFunc_Type;*/ - -static struct PyMethodDef methods[] = { - {"frompyfunc", - (PyCFunction) ufunc_frompyfunc, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"seterrobj", - (PyCFunction) ufunc_seterr, - METH_VARARGS, NULL}, - {"geterrobj", - (PyCFunction) ufunc_geterr, - METH_VARARGS, NULL}, - {"_add_newdoc_ufunc", (PyCFunction)add_newdoc_ufunc, - METH_VARARGS, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - - -#if defined(NPY_PY3K) -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "umath", - NULL, - -1, - methods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -#include <stdio.h> +/* Setup the umath part of the module */ -#if defined(NPY_PY3K) -#define RETVAL(x) x -PyMODINIT_FUNC PyInit_umath(void) -#else -#define RETVAL(x) -PyMODINIT_FUNC initumath(void) -#endif +int initumath(PyObject *m) { - PyObject *m, *d, *s, *s2, *c_api; + PyObject *d, *s, *s2; int UFUNC_FLOATING_POINT_SUPPORT = 1; #ifdef NO_UFUNC_FLOATING_POINT_SUPPORT UFUNC_FLOATING_POINT_SUPPORT = 0; #endif - /* Create the module and add the functions */ -#if defined(NPY_PY3K) - m = PyModule_Create(&moduledef); -#else - m = Py_InitModule("umath", methods); -#endif - if (!m) { - goto err; - } - - /* Import the array */ - if (_import_array() < 0) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, - "umath failed: Could not import array core."); - } - goto err; - } /* Initialize the types */ if (PyType_Ready(&PyUFunc_Type) < 0) - goto err; + return -1; /* Add some symbolic constants to the module */ d = PyModule_GetDict(m); - c_api = NpyCapsule_FromVoidPtr((void *)PyUFunc_API, NULL); - if (PyErr_Occurred()) { - goto err; - } - PyDict_SetItemString(d, "_UFUNC_API", c_api); - Py_DECREF(c_api); - if (PyErr_Occurred()) { - goto err; - } - - /* Load the ufunc operators into the array module's namespace */ - if (InitOperators(d) < 0) { - goto err; - } - PyDict_SetItemString(d, "pi", s = PyFloat_FromDouble(NPY_PI)); Py_DECREF(s); PyDict_SetItemString(d, "e", s = PyFloat_FromDouble(NPY_E)); @@ -417,19 +330,11 @@ PyMODINIT_FUNC initumath(void) PyDict_SetItemString(d, "conj", s); PyDict_SetItemString(d, "mod", s2); - initscalarmath(m); - if (!intern_strings()) { - goto err; - } - - return RETVAL(m); - - err: - /* Check for errors */ - if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, - "cannot load umath module."); + "cannot intern umath strings while initializing _multiarray_umath."); + return -1; } - return RETVAL(NULL); + + return 0; } diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index 8e058d5fb..a5e1f73ce 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -355,6 +355,16 @@ class TestDateTime(object): actual = np.array(inputs, dtype='timedelta64[D]') assert_equal(expected, actual) + def test_timedelta_0_dim_object_array_conversion(self): + # Regression test for gh-11151 + test = np.array(datetime.timedelta(seconds=20)) + actual = test.astype(np.timedelta64) + # expected value from the array constructor workaround + # described in above issue + expected = np.array(datetime.timedelta(seconds=20), + np.timedelta64) + assert_equal(actual, expected) + def test_timedelta_scalar_construction_units(self): # String construction detecting units assert_equal(np.datetime64('2010').dtype, diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 31ef9d609..e0205a467 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -729,6 +729,7 @@ def test_dtypes_are_true(): def test_invalid_dtype_string(): # test for gh-10440 assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]') + assert_raises(TypeError, np.dtype, u'Fl\xfcgel') class TestFromCTypes(object): diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py index b02f6cae2..d715569f8 100644 --- a/numpy/core/tests/test_half.py +++ b/numpy/core/tests/test_half.py @@ -5,7 +5,7 @@ import pytest import numpy as np from numpy import uint16, float16, float32, float64 -from numpy.testing import assert_, assert_equal +from numpy.testing import assert_, assert_equal, suppress_warnings def assert_raises_fpe(strmatch, callable, *args, **kwargs): @@ -301,13 +301,19 @@ class TestHalf(object): assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3]) assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3]) - x = np.maximum(b, c) - assert_(np.isnan(x[3])) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + x = np.maximum(b, c) + assert_(np.isnan(x[3])) + assert_equal(len(sup.log), 1) x[3] = 0 assert_equal(x, [0, 5, 1, 0, 6]) assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2]) - x = np.minimum(b, c) - assert_(np.isnan(x[3])) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + x = np.minimum(b, c) + assert_(np.isnan(x[3])) + assert_equal(len(sup.log), 1) x[3] = 0 assert_equal(x, [-2, -1, -np.inf, 0, 3]) assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3]) diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index 8244dfe20..c38625dac 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -1557,7 +1557,10 @@ class TestRegression(object): def test_complex_nan_maximum(self): cnan = complex(0, np.nan) - assert_equal(np.maximum(1, cnan), cnan) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + assert_equal(np.maximum(1, cnan), cnan) + assert_equal(len(sup.log), 1) def test_subclass_int_tuple_assignment(self): # ticket #1563 diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index d4bdb3d4e..c15ce83f6 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -1332,10 +1332,11 @@ class TestMinMax(object): # and put it before the call to an intrisic function that causes # invalid status to be set. Also make sure warnings are emitted for n in (2, 4, 8, 16, 32): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - for r in np.diagflat([np.nan] * n): - assert_equal(np.min(r), np.nan) + for dt in (np.float32, np.float16, np.complex64): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + for r in np.diagflat(np.array([np.nan] * n, dtype=dt)): + assert_equal(np.min(r), np.nan) assert_equal(len(sup.log), n) def test_minimize_warns(self): diff --git a/numpy/core/umath.py b/numpy/core/umath.py new file mode 100644 index 000000000..a0e8ad427 --- /dev/null +++ b/numpy/core/umath.py @@ -0,0 +1,35 @@ +""" +Create the numpy.core.umath namespace for backward compatibility. In v1.16 +the multiarray and umath c-extension modules were merged into a single +_multiarray_umath extension module. So we replicate the old namespace +by importing from the extension module. + +""" + +from . import _multiarray_umath +from numpy.core._multiarray_umath import * +from numpy.core._multiarray_umath import ( + _UFUNC_API, _add_newdoc_ufunc, _arg, _ones_like + ) + +__all__ = [ + '_UFUNC_API', 'ERR_CALL', 'ERR_DEFAULT', 'ERR_IGNORE', 'ERR_LOG', + 'ERR_PRINT', 'ERR_RAISE', 'ERR_WARN', 'FLOATING_POINT_SUPPORT', + 'FPE_DIVIDEBYZERO', 'FPE_INVALID', 'FPE_OVERFLOW', 'FPE_UNDERFLOW', 'NAN', + 'NINF', 'NZERO', 'PINF', 'PZERO', 'SHIFT_DIVIDEBYZERO', 'SHIFT_INVALID', + 'SHIFT_OVERFLOW', 'SHIFT_UNDERFLOW', 'UFUNC_BUFSIZE_DEFAULT', + 'UFUNC_PYVALS_NAME', '_add_newdoc_ufunc', '_arg', 'absolute', 'add', + 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', + 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil', 'conj', + 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', 'divide', + 'divmod', 'e', 'equal', 'euler_gamma', 'exp', 'exp2', 'expm1', 'fabs', + 'floor', 'floor_divide', 'float_power', 'fmax', 'fmin', 'fmod', 'frexp', + 'frompyfunc', 'gcd', 'geterrobj', 'greater', 'greater_equal', 'heaviside', + 'hypot', 'invert', 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', + 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2', + 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', + 'logical_xor', 'maximum', 'minimum', 'mod', 'modf', 'multiply', 'negative', + 'nextafter', 'not_equal', 'pi', 'positive', 'power', 'rad2deg', 'radians', + 'reciprocal', 'remainder', 'right_shift', 'rint', 'seterrobj', 'sign', + 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', + 'tanh', 'true_divide', 'trunc'] diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py index dc40ac67b..c1757150e 100644 --- a/numpy/lib/__init__.py +++ b/numpy/lib/__init__.py @@ -26,7 +26,7 @@ from .financial import * from .arrayterator import Arrayterator from .arraypad import * from ._version import * -from numpy.core.multiarray import tracemalloc_domain +from numpy.core._multiarray_umath import tracemalloc_domain __all__ = ['emath', 'math', 'tracemalloc_domain'] __all__ += type_check.__all__ diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py index d84455a8f..62e9b6d50 100644 --- a/numpy/lib/arraysetops.py +++ b/numpy/lib/arraysetops.py @@ -82,6 +82,11 @@ def ediff1d(ary, to_end=None, to_begin=None): # force a 1d array ary = np.asanyarray(ary).ravel() + # we have unit tests enforcing + # propagation of the dtype of input + # ary to returned result + dtype_req = ary.dtype + # fast track default case if to_begin is None and to_end is None: return ary[1:] - ary[:-1] @@ -89,13 +94,23 @@ def ediff1d(ary, to_end=None, to_begin=None): if to_begin is None: l_begin = 0 else: - to_begin = np.asanyarray(to_begin).ravel() + to_begin = np.asanyarray(to_begin) + if not np.can_cast(to_begin, dtype_req): + raise TypeError("dtype of to_begin must be compatible " + "with input ary") + + to_begin = to_begin.ravel() l_begin = len(to_begin) if to_end is None: l_end = 0 else: - to_end = np.asanyarray(to_end).ravel() + to_end = np.asanyarray(to_end) + if not np.can_cast(to_end, dtype_req): + raise TypeError("dtype of to_end must be compatible " + "with input ary") + + to_end = to_end.ravel() l_end = len(to_end) # do the calculation in place and copy to_begin and to_end diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index c76afb8e5..4b61726d2 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -6,10 +6,13 @@ from __future__ import division, absolute_import, print_function import numpy as np import sys -from numpy.testing import assert_array_equal, assert_equal, assert_raises +from numpy.testing import (assert_array_equal, assert_equal, + assert_raises, assert_raises_regex) from numpy.lib.arraysetops import ( ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin ) +import pytest + class TestSetOps(object): @@ -125,6 +128,68 @@ class TestSetOps(object): assert_array_equal([7,1], ediff1d(two_elem, to_begin=7)) assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6])) + @pytest.mark.parametrize("ary, prepend, append", [ + # should fail because trying to cast + # np.nan standard floating point value + # into an integer array: + (np.array([1, 2, 3], dtype=np.int64), + None, + np.nan), + # should fail because attempting + # to downcast to smaller int type: + (np.array([1, 2, 3], dtype=np.int32), + np.array([5, 7, 2], dtype=np.int64), + None), + # should fail because attempting to cast + # two special floating point values + # to integers (on both sides of ary): + (np.array([1., 3., 9.], dtype=np.int8), + np.nan, + np.nan), + ]) + def test_ediff1d_forbidden_type_casts(self, ary, prepend, append): + # verify resolution of gh-11490 + + # specifically, raise an appropriate + # Exception when attempting to append or + # prepend with an incompatible type + msg = 'must be compatible' + with assert_raises_regex(TypeError, msg): + ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + + @pytest.mark.parametrize("ary," + "prepend," + "append," + "expected", [ + (np.array([1, 2, 3], dtype=np.int16), + 0, + None, + np.array([0, 1, 1], dtype=np.int16)), + (np.array([1, 2, 3], dtype=np.int32), + 0, + 0, + np.array([0, 1, 1, 0], dtype=np.int32)), + (np.array([1, 2, 3], dtype=np.int64), + 3, + -9, + np.array([3, 1, 1, -9], dtype=np.int64)), + ]) + def test_ediff1d_scalar_handling(self, + ary, + prepend, + append, + expected): + # maintain backwards-compatibility + # of scalar prepend / append behavior + # in ediff1d following fix for gh-11490 + actual = np.ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + assert_equal(actual, expected) + + def test_isin(self): # the tests for in1d cover most of isin's behavior # if in1d is removed, would need to change those tests to test diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index dc72e7661..c28e77e69 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -317,13 +317,6 @@ class ABCPolyBase(object): ) needs_parens = True - # filter out uninteresting coefficients - filtered_coeffs = [ - (i, c) - for i, c in enumerate(self.coef) - # if not (c == 0) # handle NaN - ] - mute = r"\color{{LightGray}}{{{}}}".format parts = [] @@ -418,7 +411,7 @@ class ABCPolyBase(object): return self.__class__(coef, self.domain, self.window) def __div__(self, other): - # set to __floordiv__, /, for now. + # this can be removed when python 2 support is dropped. return self.__floordiv__(other) def __truediv__(self, other): diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index aa2b5b5ea..f14ed988d 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -21,9 +21,10 @@ Arithmetic ---------- - `chebadd` -- add two Chebyshev series. - `chebsub` -- subtract one Chebyshev series from another. +- `chebmulx` -- multiply a Chebyshev series in ``P_i(x)`` by ``x``. - `chebmul` -- multiply two Chebyshev series. - `chebdiv` -- divide one Chebyshev series by another. -- `chebpow` -- raise a Chebyshev series to an positive integer power +- `chebpow` -- raise a Chebyshev series to a positive integer power. - `chebval` -- evaluate a Chebyshev series at given points. - `chebval2d` -- evaluate a 2D Chebyshev series at given points. - `chebval3d` -- evaluate a 3D Chebyshev series at given points. @@ -579,7 +580,7 @@ def chebadd(c1, c2): See Also -------- - chebsub, chebmul, chebdiv, chebpow + chebsub, chebmulx, chebmul, chebdiv, chebpow Notes ----- @@ -629,7 +630,7 @@ def chebsub(c1, c2): See Also -------- - chebadd, chebmul, chebdiv, chebpow + chebadd, chebmulx, chebmul, chebdiv, chebpow Notes ----- @@ -684,6 +685,12 @@ def chebmulx(c): .. versionadded:: 1.5.0 + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebmulx([1,2,3]) + array([ 1., 2.5, 3., 1.5, 2.]) + """ # c is a trimmed copy [c] = pu.as_series([c]) @@ -722,7 +729,7 @@ def chebmul(c1, c2): See Also -------- - chebadd, chebsub, chebdiv, chebpow + chebadd, chebsub, chebmulx, chebdiv, chebpow Notes ----- @@ -773,7 +780,7 @@ def chebdiv(c1, c2): See Also -------- - chebadd, chebsub, chebmul, chebpow + chebadd, chebsub, chemulx, chebmul, chebpow Notes ----- @@ -841,10 +848,13 @@ def chebpow(c, pow, maxpower=16): See Also -------- - chebadd, chebsub, chebmul, chebdiv + chebadd, chebsub, chebmulx, chebmul, chebdiv Examples -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebpow([1, 2, 3, 4], 2) + array([15.5, 22. , 16. , 14. , 12.5, 12. , 8. ]) """ # c is a trimmed copy diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 8c33ee863..2aed4b34f 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -16,11 +16,12 @@ Constants Arithmetic ---------- -- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``. - `hermadd` -- add two Hermite series. - `hermsub` -- subtract one Hermite series from another. +- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``. - `hermmul` -- multiply two Hermite series. - `hermdiv` -- divide one Hermite series by another. +- `hermpow` -- raise a Hermite series to a positive integer power. - `hermval` -- evaluate a Hermite series at given points. - `hermval2d` -- evaluate a 2D Hermite series at given points. - `hermval3d` -- evaluate a 3D Hermite series at given points. @@ -323,7 +324,7 @@ def hermadd(c1, c2): See Also -------- - hermsub, hermmul, hermdiv, hermpow + hermsub, hermmulx, hermmul, hermdiv, hermpow Notes ----- @@ -371,7 +372,7 @@ def hermsub(c1, c2): See Also -------- - hermadd, hermmul, hermdiv, hermpow + hermadd, hermmulx, hermmul, hermdiv, hermpow Notes ----- @@ -417,6 +418,10 @@ def hermmulx(c): out : ndarray Array representing the result of the multiplication. + See Also + -------- + hermadd, hermsub, hermmul, hermdiv, hermpow + Notes ----- The multiplication uses the recursion relationship for Hermite @@ -469,7 +474,7 @@ def hermmul(c1, c2): See Also -------- - hermadd, hermsub, hermdiv, hermpow + hermadd, hermsub, hermmulx, hermdiv, hermpow Notes ----- @@ -537,7 +542,7 @@ def hermdiv(c1, c2): See Also -------- - hermadd, hermsub, hermmul, hermpow + hermadd, hermsub, hermmulx, hermmul, hermpow Notes ----- @@ -606,7 +611,7 @@ def hermpow(c, pow, maxpower=16): See Also -------- - hermadd, hermsub, hermmul, hermdiv + hermadd, hermsub, hermmulx, hermmul, hermdiv Examples -------- diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 6166c03fd..d4520ad6c 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -16,11 +16,12 @@ Constants Arithmetic ---------- -- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``. - `hermeadd` -- add two Hermite_e series. - `hermesub` -- subtract one Hermite_e series from another. +- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``. - `hermemul` -- multiply two Hermite_e series. - `hermediv` -- divide one Hermite_e series by another. +- `hermepow` -- raise a Hermite_e series to a positive integer power. - `hermeval` -- evaluate a Hermite_e series at given points. - `hermeval2d` -- evaluate a 2D Hermite_e series at given points. - `hermeval3d` -- evaluate a 3D Hermite_e series at given points. @@ -324,7 +325,7 @@ def hermeadd(c1, c2): See Also -------- - hermesub, hermemul, hermediv, hermepow + hermesub, hermemulx, hermemul, hermediv, hermepow Notes ----- @@ -372,7 +373,7 @@ def hermesub(c1, c2): See Also -------- - hermeadd, hermemul, hermediv, hermepow + hermeadd, hermemulx, hermemul, hermediv, hermepow Notes ----- @@ -470,7 +471,7 @@ def hermemul(c1, c2): See Also -------- - hermeadd, hermesub, hermediv, hermepow + hermeadd, hermesub, hermemulx, hermediv, hermepow Notes ----- @@ -538,7 +539,7 @@ def hermediv(c1, c2): See Also -------- - hermeadd, hermesub, hermemul, hermepow + hermeadd, hermesub, hermemulx, hermemul, hermepow Notes ----- @@ -605,7 +606,7 @@ def hermepow(c, pow, maxpower=16): See Also -------- - hermeadd, hermesub, hermemul, hermediv + hermeadd, hermesub, hermemulx, hermemul, hermediv Examples -------- diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 0e4554071..a116d20a7 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -16,11 +16,12 @@ Constants Arithmetic ---------- -- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``. - `lagadd` -- add two Laguerre series. - `lagsub` -- subtract one Laguerre series from another. +- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``. - `lagmul` -- multiply two Laguerre series. - `lagdiv` -- divide one Laguerre series by another. +- `lagpow` -- raise a Laguerre series to a positive integer power. - `lagval` -- evaluate a Laguerre series at given points. - `lagval2d` -- evaluate a 2D Laguerre series at given points. - `lagval3d` -- evaluate a 3D Laguerre series at given points. @@ -320,7 +321,7 @@ def lagadd(c1, c2): See Also -------- - lagsub, lagmul, lagdiv, lagpow + lagsub, lagmulx, lagmul, lagdiv, lagpow Notes ----- @@ -369,7 +370,7 @@ def lagsub(c1, c2): See Also -------- - lagadd, lagmul, lagdiv, lagpow + lagadd, lagmulx, lagmul, lagdiv, lagpow Notes ----- @@ -415,6 +416,10 @@ def lagmulx(c): out : ndarray Array representing the result of the multiplication. + See Also + -------- + lagadd, lagsub, lagmul, lagdiv, lagpow + Notes ----- The multiplication uses the recursion relationship for Laguerre @@ -468,7 +473,7 @@ def lagmul(c1, c2): See Also -------- - lagadd, lagsub, lagdiv, lagpow + lagadd, lagsub, lagmulx, lagdiv, lagpow Notes ----- @@ -536,7 +541,7 @@ def lagdiv(c1, c2): See Also -------- - lagadd, lagsub, lagmul, lagpow + lagadd, lagsub, lagmulx, lagmul, lagpow Notes ----- @@ -603,7 +608,7 @@ def lagpow(c, pow, maxpower=16): See Also -------- - lagadd, lagsub, lagmul, lagdiv + lagadd, lagsub, lagmulx, lagmul, lagdiv Examples -------- diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index 0a20707e6..e9c24594b 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -27,12 +27,12 @@ Arithmetic .. autosummary:: :toctree: generated/ - legmulx multiply a Legendre series in P_i(x) by x. legadd add two Legendre series. legsub subtract one Legendre series from another. + legmulx multiply a Legendre series in ``P_i(x)`` by ``x``. legmul multiply two Legendre series. legdiv divide one Legendre series by another. - legpow raise a Legendre series to an positive integer power + legpow raise a Legendre series to a positive integer power. legval evaluate a Legendre series at given points. legval2d evaluate a 2D Legendre series at given points. legval3d evaluate a 3D Legendre series at given points. @@ -351,7 +351,7 @@ def legadd(c1, c2): See Also -------- - legsub, legmul, legdiv, legpow + legsub, legmulx, legmul, legdiv, legpow Notes ----- @@ -401,7 +401,7 @@ def legsub(c1, c2): See Also -------- - legadd, legmul, legdiv, legpow + legadd, legmulx, legmul, legdiv, legpow Notes ----- @@ -451,6 +451,10 @@ def legmulx(c): out : ndarray Array representing the result of the multiplication. + See Also + -------- + legadd, legmul, legmul, legdiv, legpow + Notes ----- The multiplication uses the recursion relationship for Legendre @@ -460,6 +464,12 @@ def legmulx(c): xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1) + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> L.legmulx([1,2,3]) + array([ 0.66666667, 2.2, 1.33333333, 1.8]) + """ # c is a trimmed copy [c] = pu.as_series([c]) @@ -500,7 +510,7 @@ def legmul(c1, c2): See Also -------- - legadd, legsub, legdiv, legpow + legadd, legsub, legmulx, legdiv, legpow Notes ----- @@ -570,7 +580,7 @@ def legdiv(c1, c2): See Also -------- - legadd, legsub, legmul, legpow + legadd, legsub, legmulx, legmul, legpow Notes ----- @@ -640,7 +650,7 @@ def legpow(c, pow, maxpower=16): See Also -------- - legadd, legsub, legmul, legdiv + legadd, legsub, legmulx, legmul, legdiv Examples -------- diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 7c43e658a..259cd31f5 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -18,9 +18,10 @@ Arithmetic ---------- - `polyadd` -- add two polynomials. - `polysub` -- subtract one polynomial from another. +- `polymulx` -- multiply a polynomial in ``P_i(x)`` by ``x``. - `polymul` -- multiply two polynomials. - `polydiv` -- divide one polynomial by another. -- `polypow` -- raise a polynomial to an positive integer power +- `polypow` -- raise a polynomial to a positive integer power. - `polyval` -- evaluate a polynomial at given points. - `polyval2d` -- evaluate a 2D polynomial at given points. - `polyval3d` -- evaluate a 3D polynomial at given points. @@ -224,7 +225,7 @@ def polyadd(c1, c2): See Also -------- - polysub, polymul, polydiv, polypow + polysub, polymulx, polymul, polydiv, polypow Examples -------- @@ -269,7 +270,7 @@ def polysub(c1, c2): See Also -------- - polyadd, polymul, polydiv, polypow + polyadd, polymulx, polymul, polydiv, polypow Examples -------- @@ -312,6 +313,10 @@ def polymulx(c): out : ndarray Array representing the result of the multiplication. + See Also + -------- + polyadd, polysub, polymul, polydiv, polypow + Notes ----- @@ -351,7 +356,7 @@ def polymul(c1, c2): See Also -------- - polyadd, polysub, polydiv, polypow + polyadd, polysub, polymulx, polydiv, polypow Examples -------- @@ -388,7 +393,7 @@ def polydiv(c1, c2): See Also -------- - polyadd, polysub, polymul, polypow + polyadd, polysub, polymulx, polymul, polypow Examples -------- @@ -450,10 +455,13 @@ def polypow(c, pow, maxpower=None): See Also -------- - polyadd, polysub, polymul, polydiv + polyadd, polysub, polymulx, polymul, polydiv Examples -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polypow([1,2,3], 2) + array([ 1., 4., 10., 12., 9.]) """ # c is a trimmed copy diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py index 439dfa08d..7fb7492c6 100644 --- a/numpy/polynomial/tests/test_chebyshev.py +++ b/numpy/polynomial/tests/test_chebyshev.py @@ -3,6 +3,8 @@ """ from __future__ import division, absolute_import, print_function +from functools import reduce + import numpy as np import numpy.polynomial.chebyshev as cheb from numpy.polynomial.polynomial import polyval @@ -111,6 +113,15 @@ class TestArithmetic(object): res = cheb.chebadd(cheb.chebmul(quo, ci), rem) assert_equal(trim(res), trim(tgt), err_msg=msg) + def test_chebpow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(cheb.chebmul, [c]*j, np.array([1])) + res = cheb.chebpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index 18c26af8f..1287ef3fe 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -3,6 +3,8 @@ """ from __future__ import division, absolute_import, print_function +from functools import reduce + import numpy as np import numpy.polynomial.hermite as herm from numpy.polynomial.polynomial import polyval @@ -99,6 +101,15 @@ class TestArithmetic(object): res = herm.hermadd(herm.hermmul(quo, ci), rem) assert_equal(trim(res), trim(tgt), err_msg=msg) + def test_hermpow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(herm.hermmul, [c]*j, np.array([1])) + res = herm.hermpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py index 58d74dae9..ccb44ad73 100644 --- a/numpy/polynomial/tests/test_hermite_e.py +++ b/numpy/polynomial/tests/test_hermite_e.py @@ -3,6 +3,8 @@ """ from __future__ import division, absolute_import, print_function +from functools import reduce + import numpy as np import numpy.polynomial.hermite_e as herme from numpy.polynomial.polynomial import polyval @@ -99,6 +101,15 @@ class TestArithmetic(object): res = herme.hermeadd(herme.hermemul(quo, ci), rem) assert_equal(trim(res), trim(tgt), err_msg=msg) + def test_hermepow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(herme.hermemul, [c]*j, np.array([1])) + res = herme.hermepow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index 3cb630e46..3ababec5e 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -3,6 +3,8 @@ """ from __future__ import division, absolute_import, print_function +from functools import reduce + import numpy as np import numpy.polynomial.laguerre as lag from numpy.polynomial.polynomial import polyval @@ -96,6 +98,15 @@ class TestArithmetic(object): res = lag.lagadd(lag.lagmul(quo, ci), rem) assert_almost_equal(trim(res), trim(tgt), err_msg=msg) + def test_lagpow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(lag.lagmul, [c]*j, np.array([1])) + res = lag.lagpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index aeecd8775..a23086d59 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -3,6 +3,8 @@ """ from __future__ import division, absolute_import, print_function +from functools import reduce + import numpy as np import numpy.polynomial.legendre as leg from numpy.polynomial.polynomial import polyval @@ -100,6 +102,15 @@ class TestArithmetic(object): res = leg.legadd(leg.legmul(quo, ci), rem) assert_equal(trim(res), trim(tgt), err_msg=msg) + def test_legpow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(leg.legmul, [c]*j, np.array([1])) + res = leg.legpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 67728e35e..0c93be278 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -3,6 +3,8 @@ """ from __future__ import division, absolute_import, print_function +from functools import reduce + import numpy as np import numpy.polynomial.polynomial as poly from numpy.testing import ( @@ -102,6 +104,15 @@ class TestArithmetic(object): res = poly.polyadd(poly.polymul(quo, ci), rem) assert_equal(res, tgt, err_msg=msg) + def test_polypow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(poly.polymul, [c]*j, np.array([1])) + res = poly.polypow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py index 62793a9d6..675f8d242 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py @@ -12,11 +12,11 @@ try: cdll = None if hasattr(sys, 'gettotalrefcount'): try: - cdll = load_library('multiarray_d', np.core.multiarray.__file__) + cdll = load_library('_multiarray_umath_d', np.core._multiarray_umath.__file__) except OSError: pass if cdll is None: - cdll = load_library('multiarray', np.core.multiarray.__file__) + cdll = load_library('_multiarray_umath', np.core._multiarray_umath.__file__) _HAS_CTYPE = True except ImportError: _HAS_CTYPE = False @@ -30,7 +30,7 @@ class TestLoadLibrary(object): def test_basic(self): try: # Should succeed - load_library('multiarray', np.core.multiarray.__file__) + load_library('_multiarray_umath', np.core._multiarray_umath.__file__) except ImportError as e: msg = ("ctypes is not available on this python: skipping the test" " (import error was: %s)" % str(e)) @@ -43,7 +43,7 @@ class TestLoadLibrary(object): try: so = get_shared_lib_extension(is_python_ext=True) # Should succeed - load_library('multiarray%s' % so, np.core.multiarray.__file__) + load_library('_multiarray_umath%s' % so, np.core._multiarray_umath.__file__) except ImportError: print("No distutils available, skipping test.") except ImportError as e: diff --git a/tools/travis-before-install.sh b/tools/travis-before-install.sh index 1671d35b4..1b8c370d3 100755 --- a/tools/travis-before-install.sh +++ b/tools/travis-before-install.sh @@ -9,12 +9,7 @@ pushd builds # Build into own virtualenv # We therefore control our own environment, avoid travis' numpy -# -# Some change in virtualenv 14.0.5 caused `test_f2py` to fail. So, we have -# pinned `virtualenv` to the last known working version to avoid this failure. -# Appears we had some issues with certificates on Travis. It looks like -# bumping to 14.0.6 will help. -pip install -U 'virtualenv==14.0.6' +pip install -U virtualenv if [ -n "$USE_DEBUG" ] then diff --git a/tools/travis-test.sh b/tools/travis-test.sh index 97b192813..d95805cee 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -46,6 +46,8 @@ setup_base() if [ -z "$USE_DEBUG" ]; then $PIP install -v . 2>&1 | tee log else + # Python3.5-dbg on travis seems to need this + export CFLAGS=$CFLAGS" -Wno-maybe-uninitialized" $PYTHON setup.py build_ext --inplace 2>&1 | tee log fi grep -v "_configtest" log \ |
