diff options
213 files changed, 7692 insertions, 4596 deletions
diff --git a/.gitignore b/.gitignore index abba1fdc0..3952be1fe 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,12 @@ *.tmp *.vim tags +cscope.out +# gnu global +GPATH +GRTAGS +GSYMS +GTAGS # Compiled source # ################### @@ -123,7 +129,7 @@ numpy/core/src/private/npy_partition.h numpy/core/src/private/scalarmathmodule.h numpy/core/src/scalarmathmodule.c numpy/core/src/umath/funcs.inc -numpy/core/src/umath/loops.c +numpy/core/src/umath/loops.[ch] numpy/core/src/umath/operand_flag_tests.c numpy/core/src/umath/simd.inc numpy/core/src/umath/struct_ufunc_test.c diff --git a/.travis.yml b/.travis.yml index 12a443d41..e0355aa6a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,12 +14,8 @@ matrix: env: USE_DEBUG=1 - python: 2.7 env: NPY_SEPARATE_COMPILATION=0 PYTHON_OO=1 - - python: 3.3 - env: NPY_SEPARATE_COMPILATION=0 - - python: 2.7 - env: NPY_RELAXED_STRIDES_CHECKING=1 - - python: 3.3 - env: NPY_RELAXED_STRIDES_CHECKING=1 + - python: 3.4 + env: NPY_RELAXED_STRIDES_CHECKING=0 - python: 2.7 env: USE_BENTO=1 - python: 2.7 diff --git a/INSTALL.txt b/INSTALL.txt index 278aab9ef..9c12ba602 100644 --- a/INSTALL.txt +++ b/INSTALL.txt @@ -35,6 +35,21 @@ Building NumPy requires the following software installed: Python__ http://www.python.org nose__ http://somethingaboutorange.com/mrl/projects/nose/ +Basic Installation +================== + +To install numpy run: + + python setup.py build -j 4 install --prefix $HOME/.local + +This will compile numpy on 4 CPUs and install it into the specified prefix. +To perform an inplace build that can be run from the source folder run: + + python setup.py build_ext --inplace -j 4 + +The number of build jobs can also be specified via the environment variable +NPY_NUM_BUILD_JOBS. + Fortran ABI mismatch ==================== @@ -65,40 +80,34 @@ this means that g77 has been used. If libgfortran.so is a dependency, gfortran has been used. If both are dependencies, this means both have been used, which is almost always a very bad idea. -Building with ATLAS support -=========================== - -Ubuntu 8.10 (Intrepid) ----------------------- - -You can install the necessary packages for optimized ATLAS with this -command: - - sudo apt-get install libatlas-base-dev - -If you have a recent CPU with SIMD support (SSE, SSE2, etc...), you should -also install the corresponding package for optimal performance. For -example, for SSE2: +Building with optimized BLAS support +==================================== - sudo apt-get install libatlas3gf-sse2 +Ubuntu/Debian +------------- -*NOTE*: if you build your own atlas, Intrepid changed its default fortran -compiler to gfortran. So you should rebuild everything from scratch, -including lapack, to use it on Intrepid. +In order to build with optimized a BLAS providing development package must be installed. +Options are for example: -Ubuntu 8.04 and lower ---------------------- + - libblas-dev + reference BLAS not very optimized + - libatlas-base-dev + generic tuned ATLAS, it is recommended to tune it to the available hardware, + see /usr/share/doc/libatlas3-base/README.Debian for instructions + - libopenblas-base + fast and runtime detected so no tuning required but as of version 2.11 still + suffers from correctness issues on some CPUs, test your applications + thoughly. -You can install the necessary packages for optimized ATLAS with this -command: +The actual implementation can be exchanged also after installation via the +alternatives mechanism: - sudo apt-get install atlas3-base-dev + update-alternatives --config libblas.so.3 + update-alternatives --config liblapack.so.3 -If you have a recent CPU with SIMD support (SSE, SSE2, etc...), you should -also install the corresponding package for optimal performance. For -example, for SSE2: +Or by preloading a specific BLAS library with + LD_PRELOAD=/usr/lib/atlas-base/atlas/libblas.so.3 python ... - sudo apt-get install atlas3-sse2 Windows 64 bits notes ===================== diff --git a/doc/HOWTO_DOCUMENT.rst.txt b/doc/HOWTO_DOCUMENT.rst.txt index 2854b6b90..eac97512a 100644 --- a/doc/HOWTO_DOCUMENT.rst.txt +++ b/doc/HOWTO_DOCUMENT.rst.txt @@ -30,14 +30,14 @@ A Guide to NumPy/SciPy Documentation Overview -------- -In general, we follow the standard Python style conventions as described here: - * `Style Guide for C Code <http://www.python.org/peps/pep-0007.html>`_ - * `Style Guide for Python Code <http://www.python.org/peps/pep-0008.html>`_ - * `Docstring Conventions <http://www.python.org/peps/pep-0257.html>`_ +We mostly follow the standard Python style conventions as described here: + * `Style Guide for C Code <http://python.org/dev/peps/pep-0007/>`_ + * `Style Guide for Python Code <http://python.org/dev/peps/pep-0008/>`_ + * `Docstring Conventions <http://python.org/dev/peps/pep-0257/>`_ Additional PEPs of interest regarding documentation of code: - * `Docstring Processing Framework <http://www.python.org/peps/pep-0256.html>`_ - * `Docutils Design Specification <http://www.python.org/peps/pep-0258.html>`_ + * `Docstring Processing Framework <http://python.org/dev/peps/pep-0256/>`_ + * `Docutils Design Specification <http://python.org/dev/peps/pep-0258/>`_ Use a code checker: * `pylint <http://www.logilab.org/857>`_ @@ -597,7 +597,8 @@ Common reST concepts For paragraphs, indentation is significant and indicates indentation in the output. New paragraphs are marked with a blank line. -Use *italics*, **bold**, and ``monospace`` if needed in any explanations +Use ``*italics*``, ``**bold**`` and ````monospace```` if needed in any +explanations (but not for variable names and doctest code or multi-line code). Variable, module, function, and class names should be written between single back-ticks (```numpy```). diff --git a/doc/HOWTO_RELEASE.rst.txt b/doc/HOWTO_RELEASE.rst.txt index 98e7aac8f..61bf71da0 100644 --- a/doc/HOWTO_RELEASE.rst.txt +++ b/doc/HOWTO_RELEASE.rst.txt @@ -113,7 +113,7 @@ Wine For building Windows binaries on OS X Wine can be used. In Wine the following needs to be installed: -* Python 2.5-2.7 and 3.1-3.2 +* Python 2.6-2.7 and 3.2 * MakeNsis * CpuId plugin for MakeNsis : this can be found in the NumPy source tree under tools/win32build/cpucaps and has to be built with MinGW (see SConstruct file in diff --git a/doc/neps/return-of-revenge-of-matmul-pep.rst b/doc/neps/return-of-revenge-of-matmul-pep.rst new file mode 100644 index 000000000..b19f07d85 --- /dev/null +++ b/doc/neps/return-of-revenge-of-matmul-pep.rst @@ -0,0 +1,1380 @@ +PEP: 465 +Title: A dedicated infix operator for matrix multiplication +Version: $Revision$ +Last-Modified: $Date$ +Author: Nathaniel J. Smith <njs@pobox.com> +Status: Draft +Type: Standards Track +Content-Type: text/x-rst +Created: 20-Feb-2014 +Python-Version: 3.5 +Post-History: 13-Mar-2014 + +Abstract +======== + +This PEP proposes a new binary operator to be used for matrix +multiplication, called ``@``. (Mnemonic: ``@`` is ``*`` for +mATrices.) + + +Specification +============= + +A new binary operator is added to the Python language, together +with the corresponding in-place version: + +======= ========================= =============================== + Op Precedence/associativity Methods +======= ========================= =============================== +``@`` Same as ``*`` ``__matmul__``, ``__rmatmul__`` +``@=`` n/a ``__imatmul__`` +======= ========================= =============================== + +No implementations of these methods are added to the builtin or +standard library types. However, a number of projects have reached +consensus on the recommended semantics for these operations; see +`Intended usage details`_ below for details. + +For details on how this operator will be implemented in CPython, see +`Implementation details`_. + + +Motivation +========== + +Executive summary +----------------- + +In numerical code, there are two important operations which compete +for use of Python's ``*`` operator: elementwise multiplication, and +matrix multiplication. In the nearly twenty years since the Numeric +library was first proposed, there have been many attempts to resolve +this tension [#hugunin]_; none have been really satisfactory. +Currently, most numerical Python code uses ``*`` for elementwise +multiplication, and function/method syntax for matrix multiplication; +however, this leads to ugly and unreadable code in common +circumstances. The problem is bad enough that significant amounts of +code continue to use the opposite convention (which has the virtue of +producing ugly and unreadable code in *different* circumstances), and +this API fragmentation across codebases then creates yet more +problems. There does not seem to be any *good* solution to the +problem of designing a numerical API within current Python syntax -- +only a landscape of options that are bad in different ways. The +minimal change to Python syntax which is sufficient to resolve these +problems is the addition of a single new infix operator for matrix +multiplication. + +Matrix multiplication has a singular combination of features which +distinguish it from other binary operations, which together provide a +uniquely compelling case for the addition of a dedicated infix +operator: + +* Just as for the existing numerical operators, there exists a vast + body of prior art supporting the use of infix notation for matrix + multiplication across all fields of mathematics, science, and + engineering; ``@`` harmoniously fills a hole in Python's existing + operator system. + +* ``@`` greatly clarifies real-world code. + +* ``@`` provides a smoother onramp for less experienced users, who are + particularly harmed by hard-to-read code and API fragmentation. + +* ``@`` benefits a substantial and growing portion of the Python user + community. + +* ``@`` will be used frequently -- in fact, evidence suggests it may + be used more frequently than ``//`` or the bitwise operators. + +* ``@`` allows the Python numerical community to reduce fragmentation, + and finally standardize on a single consensus duck type for all + numerical array objects. + + +Background: What's wrong with the status quo? +--------------------------------------------- + +When we crunch numbers on a computer, we usually have lots and lots of +numbers to deal with. Trying to deal with them one at a time is +cumbersome and slow -- especially when using an interpreted language. +Instead, we want the ability to write down simple operations that +apply to large collections of numbers all at once. The *n-dimensional +array* is the basic object that all popular numeric computing +environments use to make this possible. Python has several libraries +that provide such arrays, with numpy being at present the most +prominent. + +When working with n-dimensional arrays, there are two different ways +we might want to define multiplication. One is elementwise +multiplication:: + + [[1, 2], [[11, 12], [[1 * 11, 2 * 12], + [3, 4]] x [13, 14]] = [3 * 13, 4 * 14]] + +and the other is `matrix multiplication`_: + +.. _matrix multiplication: https://en.wikipedia.org/wiki/Matrix_multiplication + +:: + + [[1, 2], [[11, 12], [[1 * 11 + 2 * 13, 1 * 12 + 2 * 14], + [3, 4]] x [13, 14]] = [3 * 11 + 4 * 13, 3 * 12 + 4 * 14]] + +Elementwise multiplication is useful because it lets us easily and +quickly perform many multiplications on a large collection of values, +without writing a slow and cumbersome ``for`` loop. And this works as +part of a very general schema: when using the array objects provided +by numpy or other numerical libraries, all Python operators work +elementwise on arrays of all dimensionalities. The result is that one +can write functions using straightforward code like ``a * b + c / d``, +treating the variables as if they were simple values, but then +immediately use this function to efficiently perform this calculation +on large collections of values, while keeping them organized using +whatever arbitrarily complex array layout works best for the problem +at hand. + +Matrix multiplication is more of a special case. It's only defined on +2d arrays (also known as "matrices"), and multiplication is the only +operation that has an important "matrix" version -- "matrix addition" +is the same as elementwise addition; there is no such thing as "matrix +bitwise-or" or "matrix floordiv"; "matrix division" and "matrix +to-the-power-of" can be defined but are not very useful, etc. +However, matrix multiplication is still used very heavily across all +numerical application areas; mathematically, it's one of the most +fundamental operations there is. + +Because Python syntax currently allows for only a single +multiplication operator ``*``, libraries providing array-like objects +must decide: either use ``*`` for elementwise multiplication, or use +``*`` for matrix multiplication. And, unfortunately, it turns out +that when doing general-purpose number crunching, both operations are +used frequently, and there are major advantages to using infix rather +than function call syntax in both cases. Thus it is not at all clear +which convention is optimal, or even acceptable; often it varies on a +case-by-case basis. + +Nonetheless, network effects mean that it is very important that we +pick *just one* convention. In numpy, for example, it is technically +possible to switch between the conventions, because numpy provides two +different types with different ``__mul__`` methods. For +``numpy.ndarray`` objects, ``*`` performs elementwise multiplication, +and matrix multiplication must use a function call (``numpy.dot``). +For ``numpy.matrix`` objects, ``*`` performs matrix multiplication, +and elementwise multiplication requires function syntax. Writing code +using ``numpy.ndarray`` works fine. Writing code using +``numpy.matrix`` also works fine. But trouble begins as soon as we +try to integrate these two pieces of code together. Code that expects +an ``ndarray`` and gets a ``matrix``, or vice-versa, may crash or +return incorrect results. Keeping track of which functions expect +which types as inputs, and return which types as outputs, and then +converting back and forth all the time, is incredibly cumbersome and +impossible to get right at any scale. Functions that defensively try +to handle both types as input and DTRT, find themselves floundering +into a swamp of ``isinstance`` and ``if`` statements. + +PEP 238 split ``/`` into two operators: ``/`` and ``//``. Imagine the +chaos that would have resulted if it had instead split ``int`` into +two types: ``classic_int``, whose ``__div__`` implemented floor +division, and ``new_int``, whose ``__div__`` implemented true +division. This, in a more limited way, is the situation that Python +number-crunchers currently find themselves in. + +In practice, the vast majority of projects have settled on the +convention of using ``*`` for elementwise multiplication, and function +call syntax for matrix multiplication (e.g., using ``numpy.ndarray`` +instead of ``numpy.matrix``). This reduces the problems caused by API +fragmentation, but it doesn't eliminate them. The strong desire to +use infix notation for matrix multiplication has caused a number of +specialized array libraries to continue to use the opposing convention +(e.g., scipy.sparse, pyoperators, pyviennacl) despite the problems +this causes, and ``numpy.matrix`` itself still gets used in +introductory programming courses, often appears in StackOverflow +answers, and so forth. Well-written libraries thus must continue to +be prepared to deal with both types of objects, and, of course, are +also stuck using unpleasant funcall syntax for matrix multiplication. +After nearly two decades of trying, the numerical community has still +not found any way to resolve these problems within the constraints of +current Python syntax (see `Rejected alternatives to adding a new +operator`_ below). + +This PEP proposes the minimum effective change to Python syntax that +will allow us to drain this swamp. It splits ``*`` into two +operators, just as was done for ``/``: ``*`` for elementwise +multiplication, and ``@`` for matrix multiplication. (Why not the +reverse? Because this way is compatible with the existing consensus, +and because it gives us a consistent rule that all the built-in +numeric operators also apply in an elementwise manner to arrays; the +reverse convention would lead to more special cases.) + +So that's why matrix multiplication doesn't and can't just use ``*``. +Now, in the the rest of this section, we'll explain why it nonetheless +meets the high bar for adding a new operator. + + +Why should matrix multiplication be infix? +------------------------------------------ + +Right now, most numerical code in Python uses syntax like +``numpy.dot(a, b)`` or ``a.dot(b)`` to perform matrix multiplication. +This obviously works, so why do people make such a fuss about it, even +to the point of creating API fragmentation and compatibility swamps? + +Matrix multiplication shares two features with ordinary arithmetic +operations like addition and multiplication on numbers: (a) it is used +very heavily in numerical programs -- often multiple times per line of +code -- and (b) it has an ancient and universally adopted tradition of +being written using infix syntax. This is because, for typical +formulas, this notation is dramatically more readable than any +function call syntax. Here's an example to demonstrate: + +One of the most useful tools for testing a statistical hypothesis is +the linear hypothesis test for OLS regression models. It doesn't +really matter what all those words I just said mean; if we find +ourselves having to implement this thing, what we'll do is look up +some textbook or paper on it, and encounter many mathematical formulas +that look like: + +.. math:: + + S = (H \beta - r)^T (H V H^T)^{-1} (H \beta - r) + +Here the various variables are all vectors or matrices (details for +the curious: [#lht]_). + +Now we need to write code to perform this calculation. In current +numpy, matrix multiplication can be performed using either the +function or method call syntax. Neither provides a particularly +readable translation of the formula:: + + import numpy as np + from numpy.linalg import inv, solve + + # Using dot function: + S = np.dot((np.dot(H, beta) - r).T, + np.dot(inv(np.dot(np.dot(H, V), H.T)), np.dot(H, beta) - r)) + + # Using dot method: + S = (H.dot(beta) - r).T.dot(inv(H.dot(V).dot(H.T))).dot(H.dot(beta) - r) + +With the ``@`` operator, the direct translation of the above formula +becomes:: + + S = (H @ beta - r).T @ inv(H @ V @ H.T) @ (H @ beta - r) + +Notice that there is now a transparent, 1-to-1 mapping between the +symbols in the original formula and the code that implements it. + +Of course, an experienced programmer will probably notice that this is +not the best way to compute this expression. The repeated computation +of :math:`H \beta - r` should perhaps be factored out; and, +expressions of the form ``dot(inv(A), B)`` should almost always be +replaced by the more numerically stable ``solve(A, B)``. When using +``@``, performing these two refactorings gives us:: + + # Version 1 (as above) + S = (H @ beta - r).T @ inv(H @ V @ H.T) @ (H @ beta - r) + + # Version 2 + trans_coef = H @ beta - r + S = trans_coef.T @ inv(H @ V @ H.T) @ trans_coef + + # Version 3 + S = trans_coef.T @ solve(H @ V @ H.T, trans_coef) + +Notice that when comparing between each pair of steps, it's very easy +to see exactly what was changed. If we apply the equivalent +transformations to the code using the .dot method, then the changes +are much harder to read out or verify for correctness:: + + # Version 1 (as above) + S = (H.dot(beta) - r).T.dot(inv(H.dot(V).dot(H.T))).dot(H.dot(beta) - r) + + # Version 2 + trans_coef = H.dot(beta) - r + S = trans_coef.T.dot(inv(H.dot(V).dot(H.T))).dot(trans_coef) + + # Version 3 + S = trans_coef.T.dot(solve(H.dot(V).dot(H.T)), trans_coef) + +Readability counts! The statements using ``@`` are shorter, contain +more whitespace, can be directly and easily compared both to each +other and to the textbook formula, and contain only meaningful +parentheses. This last point is particularly important for +readability: when using function-call syntax, the required parentheses +on every operation create visual clutter that makes it very difficult +to parse out the overall structure of the formula by eye, even for a +relatively simple formula like this one. Eyes are terrible at parsing +non-regular languages. I made and caught many errors while trying to +write out the 'dot' formulas above. I know they still contain at +least one error, maybe more. (Exercise: find it. Or them.) The +``@`` examples, by contrast, are not only correct, they're obviously +correct at a glance. + +If we are even more sophisticated programmers, and writing code that +we expect to be reused, then considerations of speed or numerical +accuracy might lead us to prefer some particular order of evaluation. +Because ``@`` makes it possible to omit irrelevant parentheses, we can +be certain that if we *do* write something like ``(H @ V) @ H.T``, +then our readers will know that the parentheses must have been added +intentionally to accomplish some meaningful purpose. In the ``dot`` +examples, it's impossible to know which nesting decisions are +important, and which are arbitrary. + +Infix ``@`` dramatically improves matrix code usability at all stages +of programmer interaction. + + +Transparent syntax is especially crucial for non-expert programmers +------------------------------------------------------------------- + +A large proportion of scientific code is written by people who are +experts in their domain, but are not experts in programming. And +there are many university courses run each year with titles like "Data +analysis for social scientists" which assume no programming +background, and teach some combination of mathematical techniques, +introduction to programming, and the use of programming to implement +these mathematical techniques, all within a 10-15 week period. These +courses are more and more often being taught in Python rather than +special-purpose languages like R or Matlab. + +For these kinds of users, whose programming knowledge is fragile, the +existence of a transparent mapping between formulas and code often +means the difference between succeeding and failing to write that code +at all. This is so important that such classes often use the +``numpy.matrix`` type which defines ``*`` to mean matrix +multiplication, even though this type is buggy and heavily +disrecommended by the rest of the numpy community for the +fragmentation that it causes. This pedagogical use case is, in fact, +the *only* reason ``numpy.matrix`` remains a supported part of numpy. +Adding ``@`` will benefit both beginning and advanced users with +better syntax; and furthermore, it will allow both groups to +standardize on the same notation from the start, providing a smoother +on-ramp to expertise. + + +But isn't matrix multiplication a pretty niche requirement? +----------------------------------------------------------- + +The world is full of continuous data, and computers are increasingly +called upon to work with it in sophisticated ways. Arrays are the +lingua franca of finance, machine learning, 3d graphics, computer +vision, robotics, operations research, econometrics, meteorology, +computational linguistics, recommendation systems, neuroscience, +astronomy, bioinformatics (including genetics, cancer research, drug +discovery, etc.), physics engines, quantum mechanics, geophysics, +network analysis, and many other application areas. In most or all of +these areas, Python is rapidly becoming a dominant player, in large +part because of its ability to elegantly mix traditional discrete data +structures (hash tables, strings, etc.) on an equal footing with +modern numerical data types and algorithms. + +We all live in our own little sub-communities, so some Python users +may be surprised to realize the sheer extent to which Python is used +for number crunching -- especially since much of this particular +sub-community's activity occurs outside of traditional Python/FOSS +channels. So, to give some rough idea of just how many numerical +Python programmers are actually out there, here are two numbers: In +2013, there were 7 international conferences organized specifically on +numerical Python [#scipy-conf]_ [#pydata-conf]_. At PyCon 2014, ~20% +of the tutorials appear to involve the use of matrices +[#pycon-tutorials]_. + +To quantify this further, we used Github's "search" function to look +at what modules are actually imported across a wide range of +real-world code (i.e., all the code on Github). We checked for +imports of several popular stdlib modules, a variety of numerically +oriented modules, and various other extremely high-profile modules +like django and lxml (the latter of which is the #1 most downloaded +package on PyPI). Starred lines indicate packages which export array- +or matrix-like objects which will adopt ``@`` if this PEP is +approved:: + + Count of Python source files on Github matching given search terms + (as of 2014-04-10, ~21:00 UTC) + ================ ========== =============== ======= =========== + module "import X" "from X import" total total/numpy + ================ ========== =============== ======= =========== + sys 2374638 63301 2437939 5.85 + os 1971515 37571 2009086 4.82 + re 1294651 8358 1303009 3.12 + numpy ************** 337916 ********** 79065 * 416981 ******* 1.00 + warnings 298195 73150 371345 0.89 + subprocess 281290 63644 344934 0.83 + django 62795 219302 282097 0.68 + math 200084 81903 281987 0.68 + threading 212302 45423 257725 0.62 + pickle+cPickle 215349 22672 238021 0.57 + matplotlib 119054 27859 146913 0.35 + sqlalchemy 29842 82850 112692 0.27 + pylab *************** 36754 ********** 41063 ** 77817 ******* 0.19 + scipy *************** 40829 ********** 28263 ** 69092 ******* 0.17 + lxml 19026 38061 57087 0.14 + zlib 40486 6623 47109 0.11 + multiprocessing 25247 19850 45097 0.11 + requests 30896 560 31456 0.08 + jinja2 8057 24047 32104 0.08 + twisted 13858 6404 20262 0.05 + gevent 11309 8529 19838 0.05 + pandas ************** 14923 *********** 4005 ** 18928 ******* 0.05 + sympy 2779 9537 12316 0.03 + theano *************** 3654 *********** 1828 *** 5482 ******* 0.01 + ================ ========== =============== ======= =========== + +These numbers should be taken with several grains of salt (see +footnote for discussion: [#github-details]_), but, to the extent they +can be trusted, they suggest that ``numpy`` might be the single +most-imported non-stdlib module in the entire Pythonverse; it's even +more-imported than such stdlib stalwarts as ``subprocess``, ``math``, +``pickle``, and ``threading``. And numpy users represent only a +subset of the broader numerical community that will benefit from the +``@`` operator. Matrices may once have been a niche data type +restricted to Fortran programs running in university labs and military +clusters, but those days are long gone. Number crunching is a +mainstream part of modern Python usage. + +In addition, there is some precedence for adding an infix operator to +handle a more-specialized arithmetic operation: the floor division +operator ``//``, like the bitwise operators, is very useful under +certain circumstances when performing exact calculations on discrete +values. But it seems likely that there are many Python programmers +who have never had reason to use ``//`` (or, for that matter, the +bitwise operators). ``@`` is no more niche than ``//``. + + +So ``@`` is good for matrix formulas, but how common are those really? +---------------------------------------------------------------------- + +We've seen that ``@`` makes matrix formulas dramatically easier to +work with for both experts and non-experts, that matrix formulas +appear in many important applications, and that numerical libraries +like numpy are used by a substantial proportion of Python's user base. +But numerical libraries aren't just about matrix formulas, and being +important doesn't necessarily mean taking up a lot of code: if matrix +formulas only occured in one or two places in the average +numerically-oriented project, then it still wouldn't be worth adding a +new operator. So how common is matrix multiplication, really? + +When the going gets tough, the tough get empirical. To get a rough +estimate of how useful the ``@`` operator will be, the table below +shows the rate at which different Python operators are actually used +in the stdlib, and also in two high-profile numerical packages -- the +scikit-learn machine learning library, and the nipy neuroimaging +library -- normalized by source lines of code (SLOC). Rows are sorted +by the 'combined' column, which pools all three code bases together. +The combined column is thus strongly weighted towards the stdlib, +which is much larger than both projects put together (stdlib: 411575 +SLOC, scikit-learn: 50924 SLOC, nipy: 37078 SLOC). [#sloc-details]_ + +The ``dot`` row (marked ``******``) counts how common matrix multiply +operations are in each codebase. + +:: + + ==== ====== ============ ==== ======== + op stdlib scikit-learn nipy combined + ==== ====== ============ ==== ======== + = 2969 5536 4932 3376 / 10,000 SLOC + - 218 444 496 261 + + 224 201 348 231 + == 177 248 334 196 + * 156 284 465 192 + % 121 114 107 119 + ** 59 111 118 68 + != 40 56 74 44 + / 18 121 183 41 + > 29 70 110 39 + += 34 61 67 39 + < 32 62 76 38 + >= 19 17 17 18 + <= 18 27 12 18 + dot ***** 0 ********** 99 ** 74 ****** 16 + | 18 1 2 15 + & 14 0 6 12 + << 10 1 1 8 + // 9 9 1 8 + -= 5 21 14 8 + *= 2 19 22 5 + /= 0 23 16 4 + >> 4 0 0 3 + ^ 3 0 0 3 + ~ 2 4 5 2 + |= 3 0 0 2 + &= 1 0 0 1 + //= 1 0 0 1 + ^= 1 0 0 0 + **= 0 2 0 0 + %= 0 0 0 0 + <<= 0 0 0 0 + >>= 0 0 0 0 + ==== ====== ============ ==== ======== + +These two numerical packages alone contain ~780 uses of matrix +multiplication. Within these packages, matrix multiplication is used +more heavily than most comparison operators (``<`` ``!=`` ``<=`` +``>=``). Even when we dilute these counts by including the stdlib +into our comparisons, matrix multiplication is still used more often +in total than any of the bitwise operators, and 2x as often as ``//``. +This is true even though the stdlib, which contains a fair amount of +integer arithmetic and no matrix operations, makes up more than 80% of +the combined code base. + +By coincidence, the numeric libraries make up approximately the same +proportion of the 'combined' codebase as numeric tutorials make up of +PyCon 2014's tutorial schedule, which suggests that the 'combined' +column may not be *wildly* unrepresentative of new Python code in +general. While it's impossible to know for certain, from this data it +seems entirely possible that across all Python code currently being +written, matrix multiplication is already used more often than ``//`` +and the bitwise operations. + + +But isn't it weird to add an operator with no stdlib uses? +---------------------------------------------------------- + +It's certainly unusual (though extended slicing existed for some time +builtin types gained support for it, ``Ellipsis`` is still unused +within the stdlib, etc.). But the important thing is whether a change +will benefit users, not where the software is being downloaded from. +It's clear from the above that ``@`` will be used, and used heavily. +And this PEP provides the critical piece that will allow the Python +numerical community to finally reach consensus on a standard duck type +for all array-like objects, which is a necessary precondition to ever +adding a numerical array type to the stdlib. + + +Compatibility considerations +============================ + +Currently, the only legal use of the ``@`` token in Python code is at +statement beginning in decorators. The new operators are both infix; +the one place they can never occur is at statement beginning. +Therefore, no existing code will be broken by the addition of these +operators, and there is no possible parsing ambiguity between +decorator-@ and the new operators. + +Another important kind of compatibility is the mental cost paid by +users to update their understanding of the Python language after this +change, particularly for users who do not work with matrices and thus +do not benefit. Here again, ``@`` has minimal impact: even +comprehensive tutorials and references will only need to add a +sentence or two to fully document this PEP's changes for a +non-numerical audience. + + +Intended usage details +====================== + +This section is informative, rather than normative -- it documents the +consensus of a number of libraries that provide array- or matrix-like +objects on how ``@`` will be implemented. + +This section uses the numpy terminology for describing arbitrary +multidimensional arrays of data, because it is a superset of all other +commonly used models. In this model, the *shape* of any array is +represented by a tuple of integers. Because matrices are +two-dimensional, they have len(shape) == 2, while 1d vectors have +len(shape) == 1, and scalars have shape == (), i.e., they are "0 +dimensional". Any array contains prod(shape) total entries. Notice +that `prod(()) == 1`_ (for the same reason that sum(()) == 0); scalars +are just an ordinary kind of array, not a special case. Notice also +that we distinguish between a single scalar value (shape == (), +analogous to ``1``), a vector containing only a single entry (shape == +(1,), analogous to ``[1]``), a matrix containing only a single entry +(shape == (1, 1), analogous to ``[[1]]``), etc., so the dimensionality +of any array is always well-defined. Other libraries with more +restricted representations (e.g., those that support 2d arrays only) +might implement only a subset of the functionality described here. + +.. _prod(()) == 1: https://en.wikipedia.org/wiki/Empty_product + +Semantics +--------- + +The recommended semantics for ``@`` for different inputs are: + +* 2d inputs are conventional matrices, and so the semantics are + obvious: we apply conventional matrix multiplication. If we write + ``arr(2, 3)`` to represent an arbitrary 2x3 array, then ``arr(2, 3) + @ arr(3, 4)`` returns an array with shape (2, 4). + +* 1d vector inputs are promoted to 2d by prepending or appending a '1' + to the shape, the operation is performed, and then the added + dimension is removed from the output. The 1 is always added on the + "outside" of the shape: prepended for left arguments, and appended + for right arguments. The result is that matrix @ vector and vector + @ matrix are both legal (assuming compatible shapes), and both + return 1d vectors; vector @ vector returns a scalar. This is + clearer with examples. + + * ``arr(2, 3) @ arr(3, 1)`` is a regular matrix product, and returns + an array with shape (2, 1), i.e., a column vector. + + * ``arr(2, 3) @ arr(3)`` performs the same computation as the + previous (i.e., treats the 1d vector as a matrix containing a + single *column*, shape = (3, 1)), but returns the result with + shape (2,), i.e., a 1d vector. + + * ``arr(1, 3) @ arr(3, 2)`` is a regular matrix product, and returns + an array with shape (1, 2), i.e., a row vector. + + * ``arr(3) @ arr(3, 2)`` performs the same computation as the + previous (i.e., treats the 1d vector as a matrix containing a + single *row*, shape = (1, 3)), but returns the result with shape + (2,), i.e., a 1d vector. + + * ``arr(1, 3) @ arr(3, 1)`` is a regular matrix product, and returns + an array with shape (1, 1), i.e., a single value in matrix form. + + * ``arr(3) @ arr(3)`` performs the same computation as the + previous, but returns the result with shape (), i.e., a single + scalar value, not in matrix form. So this is the standard inner + product on vectors. + + An infelicity of this definition for 1d vectors is that it makes + ``@`` non-associative in some cases (``(Mat1 @ vec) @ Mat2`` != + ``Mat1 @ (vec @ Mat2)``). But this seems to be a case where + practicality beats purity: non-associativity only arises for strange + expressions that would never be written in practice; if they are + written anyway then there is a consistent rule for understanding + what will happen (``Mat1 @ vec @ Mat2`` is parsed as ``(Mat1 @ vec) + @ Mat2``, just like ``a - b - c``); and, not supporting 1d vectors + would rule out many important use cases that do arise very commonly + in practice. No-one wants to explain to new users why to solve the + simplest linear system in the obvious way, they have to type + ``(inv(A) @ b[:, np.newaxis]).flatten()`` instead of ``inv(A) @ b``, + or perform an ordinary least-squares regression by typing + ``solve(X.T @ X, X @ y[:, np.newaxis]).flatten()`` instead of + ``solve(X.T @ X, X @ y)``. No-one wants to type ``(a[np.newaxis, :] + @ b[:, np.newaxis])[0, 0]`` instead of ``a @ b`` every time they + compute an inner product, or ``(a[np.newaxis, :] @ Mat @ b[:, + np.newaxis])[0, 0]`` for general quadratic forms instead of ``a @ + Mat @ b``. In addition, sage and sympy (see below) use these + non-associative semantics with an infix matrix multiplication + operator (they use ``*``), and they report that they haven't + experienced any problems caused by it. + +* For inputs with more than 2 dimensions, we treat the last two + dimensions as being the dimensions of the matrices to multiply, and + 'broadcast' across the other dimensions. This provides a convenient + way to quickly compute many matrix products in a single operation. + For example, ``arr(10, 2, 3) @ arr(10, 3, 4)`` performs 10 separate + matrix multiplies, each of which multiplies a 2x3 and a 3x4 matrix + to produce a 2x4 matrix, and then returns the 10 resulting matrices + together in an array with shape (10, 2, 4). The intuition here is + that we treat these 3d arrays of numbers as if they were 1d arrays + *of matrices*, and then apply matrix multiplication in an + elementwise manner, where now each 'element' is a whole matrix. + Note that broadcasting is not limited to perfectly aligned arrays; + in more complicated cases, it allows several simple but powerful + tricks for controlling how arrays are aligned with each other; see + [#broadcasting]_ for details. (In particular, it turns out that + when broadcasting is taken into account, the standard scalar * + matrix product is a special case of the elementwise multiplication + operator ``*``.) + + If one operand is >2d, and another operand is 1d, then the above + rules apply unchanged, with 1d->2d promotion performed before + broadcasting. E.g., ``arr(10, 2, 3) @ arr(3)`` first promotes to + ``arr(10, 2, 3) @ arr(3, 1)``, then broadcasts the right argument to + create the aligned operation ``arr(10, 2, 3) @ arr(10, 3, 1)``, + multiplies to get an array with shape (10, 2, 1), and finally + removes the added dimension, returning an array with shape (10, 2). + Similarly, ``arr(2) @ arr(10, 2, 3)`` produces an intermediate array + with shape (10, 1, 3), and a final array with shape (10, 3). + +* 0d (scalar) inputs raise an error. Scalar * matrix multiplication + is a mathematically and algorithmically distinct operation from + matrix @ matrix multiplication, and is already covered by the + elementwise ``*`` operator. Allowing scalar @ matrix would thus + both require an unnecessary special case, and violate TOOWTDI. + + +Adoption +-------- + +We group existing Python projects which provide array- or matrix-like +types based on what API they currently use for elementwise and matrix +multiplication. + +**Projects which currently use * for elementwise multiplication, and +function/method calls for matrix multiplication:** + +The developers of the following projects have expressed an intention +to implement ``@`` on their array-like types using the above +semantics: + +* numpy +* pandas +* blaze +* theano + +The following projects have been alerted to the existence of the PEP, +but it's not yet known what they plan to do if it's accepted. We +don't anticipate that they'll have any objections, though, since +everything proposed here is consistent with how they already do +things: + +* pycuda +* panda3d + +**Projects which currently use * for matrix multiplication, and +function/method calls for elementwise multiplication:** + +The following projects have expressed an intention, if this PEP is +accepted, to migrate from their current API to the elementwise-``*``, +matmul-``@`` convention (i.e., this is a list of projects whose API +fragmentation will probably be eliminated if this PEP is accepted): + +* numpy (``numpy.matrix``) +* scipy.sparse +* pyoperators +* pyviennacl + +The following projects have been alerted to the existence of the PEP, +but it's not known what they plan to do if it's accepted (i.e., this +is a list of projects whose API fragmentation may or may not be +eliminated if this PEP is accepted): + +* cvxopt + +**Projects which currently use * for matrix multiplication, and which +don't really care about elementwise multiplication of matrices:** + +There are several projects which implement matrix types, but from a +very different perspective than the numerical libraries discussed +above. These projects focus on computational methods for analyzing +matrices in the sense of abstract mathematical objects (i.e., linear +maps over free modules over rings), rather than as big bags full of +numbers that need crunching. And it turns out that from the abstract +math point of view, there isn't much use for elementwise operations in +the first place; as discussed in the Background section above, +elementwise operations are motivated by the bag-of-numbers approach. +So these projects don't encounter the basic problem that this PEP +exists to address, making it mostly irrelevant to them; while they +appear superficially similar to projects like numpy, they're actually +doing something quite different. They use ``*`` for matrix +multiplication (and for group actions, and so forth), and if this PEP +is accepted, their expressed intention is to continue doing so, while +perhaps adding ``@`` as an alias. These projects include: + +* sympy +* sage + + +Implementation details +====================== + +New functions ``operator.matmul`` and ``operator.__matmul__`` are +added to the standard library, with the usual semantics. + +A corresponding function ``PyObject* PyObject_MatrixMultiply(PyObject +*o1, PyObject o2)`` is added to the C API. + +A new AST node is added named ``MatMult``, along with a new token +``ATEQUAL`` and new bytecode opcodes ``BINARY_MATRIX_MULTIPLY`` and +``INPLACE_MATRIX_MULTIPLY``. + +Two new type slots are added; whether this is to ``PyNumberMethods`` +or a new ``PyMatrixMethods`` struct remains to be determined. + + +Rationale for specification details +=================================== + +Choice of operator +------------------ + +Why ``@`` instead of some other spelling? There isn't any consensus +across other programming languages about how this operator should be +named [#matmul-other-langs]_; here we discuss the various options. + +Restricting ourselves only to symbols present on US English keyboards, +the punctuation characters that don't already have a meaning in Python +expression context are: ``@``, backtick, ``$``, ``!``, and ``?``. Of +these options, ``@`` is clearly the best; ``!`` and ``?`` are already +heavily freighted with inapplicable meanings in the programming +context, backtick has been banned from Python by BDFL pronouncement +(see PEP 3099), and ``$`` is uglier, even more dissimilar to ``*`` and +:math:`\cdot`, and has Perl/PHP baggage. ``$`` is probably the +second-best option of these, though. + +Symbols which are not present on US English keyboards start at a +significant disadvantage (having to spend 5 minutes at the beginning +of every numeric Python tutorial just going over keyboard layouts is +not a hassle anyone really wants). Plus, even if we somehow overcame +the typing problem, it's not clear there are any that are actually +better than ``@``. Some options that have been suggested include: + +* U+00D7 MULTIPLICATION SIGN: ``A × B`` +* U+22C5 DOT OPERATOR: ``A ⋅ B`` +* U+2297 CIRCLED TIMES: ``A ⊗ B`` +* U+00B0 DEGREE: ``A ° B`` + +What we need, though, is an operator that means "matrix +multiplication, as opposed to scalar/elementwise multiplication". +There is no conventional symbol with this meaning in either +programming or mathematics, where these operations are usually +distinguished by context. (And U+2297 CIRCLED TIMES is actually used +conventionally to mean exactly the wrong things: elementwise +multiplication -- the "Hadamard product" -- or outer product, rather +than matrix/inner product like our operator). ``@`` at least has the +virtue that it *looks* like a funny non-commutative operator; a naive +user who knows maths but not programming couldn't look at ``A * B`` +versus ``A × B``, or ``A * B`` versus ``A ⋅ B``, or ``A * B`` versus +``A ° B`` and guess which one is the usual multiplication, and which +one is the special case. + +Finally, there is the option of using multi-character tokens. Some +options: + +* Matlab and Julia use a ``.*`` operator. Aside from being visually + confusable with ``*``, this would be a terrible choice for us + because in Matlab and Julia, ``*`` means matrix multiplication and + ``.*`` means elementwise multiplication, so using ``.*`` for matrix + multiplication would make us exactly backwards from what Matlab and + Julia users expect. + +* APL apparently used ``+.×``, which by combining a multi-character + token, confusing attribute-access-like . syntax, and a unicode + character, ranks somewhere below U+2603 SNOWMAN on our candidate + list. If we like the idea of combining addition and multiplication + operators as being evocative of how matrix multiplication actually + works, then something like ``+*`` could be used -- though this may + be too easy to confuse with ``*+``, which is just multiplication + combined with the unary ``+`` operator. + +* PEP 211 suggested ``~*``. This has the downside that it sort of + suggests that there is a unary ``*`` operator that is being combined + with unary ``~``, but it could work. + +* R uses ``%*%`` for matrix multiplication. In R this forms part of a + general extensible infix system in which all tokens of the form + ``%foo%`` are user-defined binary operators. We could steal the + token without stealing the system. + +* Some other plausible candidates that have been suggested: ``><`` (= + ascii drawing of the multiplication sign ×); the footnote operator + ``[*]`` or ``|*|`` (but when used in context, the use of vertical + grouping symbols tends to recreate the nested parentheses visual + clutter that was noted as one of the major downsides of the function + syntax we're trying to get away from); ``^*``. + +So, it doesn't matter much, but ``@`` seems as good or better than any +of the alternatives: + +* It's a friendly character that Pythoneers are already used to typing + in decorators, but the decorator usage and the math expression + usage are sufficiently dissimilar that it would be hard to confuse + them in practice. + +* It's widely accessible across keyboard layouts (and thanks to its + use in email addresses, this is true even of weird keyboards like + those in phones). + +* It's round like ``*`` and :math:`\cdot`. + +* The mATrices mnemonic is cute. + +* The swirly shape is reminiscent of the simultaneous sweeps over rows + and columns that define matrix multiplication + +* Its asymmetry is evocative of its non-commutative nature. + +* Whatever, we have to pick something. + + +Precedence and associativity +---------------------------- + +There was a long discussion [#associativity-discussions]_ about +whether ``@`` should be right- or left-associative (or even something +more exotic [#group-associativity]_). Almost all Python operators are +left-associative, so following this convention would be the simplest +approach, but there were two arguments that suggested matrix +multiplication might be worth making right-associative as a special +case: + +First, matrix multiplication has a tight conceptual association with +function application/composition, so many mathematically sophisticated +users have an intuition that an expression like :math:`R S x` proceeds +from right-to-left, with first :math:`S` transforming the vector +:math:`x`, and then :math:`R` transforming the result. This isn't +universally agreed (and not all number-crunchers are steeped in the +pure-math conceptual framework that motivates this intuition +[#oil-industry-versus-right-associativity]_), but at the least this +intuition is more common than for other operations like :math:`2 \cdot +3 \cdot 4` which everyone reads as going from left-to-right. + +Second, if expressions like ``Mat @ Mat @ vec`` appear often in code, +then programs will run faster (and efficiency-minded programmers will +be able to use fewer parentheses) if this is evaluated as ``Mat @ (Mat +@ vec)`` then if it is evaluated like ``(Mat @ Mat) @ vec``. + +However, weighing against these arguments are the following: + +Regarding the efficiency argument, empirically, we were unable to find +any evidence that ``Mat @ Mat @ vec`` type expressions actually +dominate in real-life code. Parsing a number of large projects that +use numpy, we found that when forced by numpy's current funcall syntax +to choose an order of operations for nested calls to ``dot``, people +actually use left-associative nesting slightly *more* often than +right-associative nesting [#numpy-associativity-counts]_. And anyway, +writing parentheses isn't so bad -- if an efficiency-minded programmer +is going to take the trouble to think through the best way to evaluate +some expression, they probably *should* write down the parentheses +regardless of whether they're needed, just to make it obvious to the +next reader that they order of operations matter. + +In addition, it turns out that other languages, including those with +much more of a focus on linear algebra, overwhelmingly make their +matmul operators left-associative. Specifically, the ``@`` equivalent +is left-associative in R, Matlab, Julia, IDL, and Gauss. The only +exceptions we found are Mathematica, in which ``a @ b @ c`` would be +parsed non-associatively as ``dot(a, b, c)``, and APL, in which all +operators are right-associative. There do not seem to exist any +languages that make ``@`` right-associative and ``*`` +left-associative. And these decisions don't seem to be controversial +-- I've never seen anyone complaining about this particular aspect of +any of these other languages, and the left-associativity of ``*`` +doesn't seem to bother users of the existing Python libraries that use +``*`` for matrix multiplication. So, at the least we can conclude from +this that making ``@`` left-associative will certainly not cause any +disasters. Making ``@`` right-associative, OTOH, would be exploring +new and uncertain ground. + +And another advantage of left-associativity is that it is much easier +to learn and remember that ``@`` acts like ``*``, than it is to +remember first that ``@`` is unlike other Python operators by being +right-associative, and then on top of this, also have to remember +whether it is more tightly or more loosely binding than +``*``. (Right-associativity forces us to choose a precedence, and +intuitions were about equally split on which precedence made more +sense. So this suggests that no matter which choice we made, no-one +would be able to guess or remember it.) + +On net, therefore, the general consensus of the numerical community is +that while matrix multiplication is something of a special case, it's +not special enough to break the rules, and ``@`` should parse like +``*`` does. + + +(Non)-Definitions for built-in types +------------------------------------ + +No ``__matmul__`` or ``__matpow__`` are defined for builtin numeric +types (``float``, ``int``, etc.) or for the ``numbers.Number`` +hierarchy, because these types represent scalars, and the consensus +semantics for ``@`` are that it should raise an error on scalars. + +We do not -- for now -- define a ``__matmul__`` method on the standard +``memoryview`` or ``array.array`` objects, for several reasons. Of +course this could be added if someone wants it, but these types would +require quite a bit of additional work beyond ``__matmul__`` before +they could be used for numeric work -- e.g., they have no way to do +addition or scalar multiplication either! -- and adding such +functionality is beyond the scope of this PEP. In addition, providing +a quality implementation of matrix multiplication is highly +non-trivial. Naive nested loop implementations are very slow and +shipping such an implementation in CPython would just create a trap +for users. But the alternative -- providing a modern, competitive +matrix multiply -- would require that CPython link to a BLAS library, +which brings a set of new complications. In particular, several +popular BLAS libraries (including the one that ships by default on +OS X) currently break the use of ``multiprocessing`` [#blas-fork]_. +Together, these considerations mean that the cost/benefit of adding +``__matmul__`` to these types just isn't there, so for now we'll +continue to delegate these problems to numpy and friends, and defer a +more systematic solution to a future proposal. + +There are also non-numeric Python builtins which define ``__mul__`` +(``str``, ``list``, ...). We do not define ``__matmul__`` for these +types either, because why would we even do that. + + +Non-definition of matrix power +------------------------------ + +Earlier versions of this PEP also proposed a matrix power operator, +``@@``, analogous to ``**``. But on further consideration, it was +decided that the utility of this was sufficiently unclear that it +would be better to leave it out for now, and only revisit the issue if +-- once we have more experience with ``@`` -- it turns out that ``@@`` +is truly missed. [#atat-discussion]_ + + +Rejected alternatives to adding a new operator +============================================== + +Over the past few decades, the Python numeric community has explored a +variety of ways to resolve the tension between matrix and elementwise +multiplication operations. PEP 211 and PEP 225, both proposed in 2000 +and last seriously discussed in 2008 [#threads-2008]_, were early +attempts to add new operators to solve this problem, but suffered from +serious flaws; in particular, at that time the Python numerical +community had not yet reached consensus on the proper API for array +objects, or on what operators might be needed or useful (e.g., PEP 225 +proposes 6 new operators with unspecified semantics). Experience +since then has now led to consensus that the best solution, for both +numeric Python and core Python, is to add a single infix operator for +matrix multiply (together with the other new operators this implies +like ``@=``). + +We review some of the rejected alternatives here. + +**Use a second type that defines __mul__ as matrix multiplication:** +As discussed above (`Background: What's wrong with the status quo?`_), +this has been tried this for many years via the ``numpy.matrix`` type +(and its predecessors in Numeric and numarray). The result is a +strong consensus among both numpy developers and developers of +downstream packages that ``numpy.matrix`` should essentially never be +used, because of the problems caused by having conflicting duck types +for arrays. (Of course one could then argue we should *only* define +``__mul__`` to be matrix multiplication, but then we'd have the same +problem with elementwise multiplication.) There have been several +pushes to remove ``numpy.matrix`` entirely; the only counter-arguments +have come from educators who find that its problems are outweighed by +the need to provide a simple and clear mapping between mathematical +notation and code for novices (see `Transparent syntax is especially +crucial for non-expert programmers`_). But, of course, starting out +newbies with a dispreferred syntax and then expecting them to +transition later causes its own problems. The two-type solution is +worse than the disease. + +**Add lots of new operators, or add a new generic syntax for defining +infix operators:** In addition to being generally un-Pythonic and +repeatedly rejected by BDFL fiat, this would be using a sledgehammer +to smash a fly. The scientific python community has consensus that +adding one operator for matrix multiplication is enough to fix the one +otherwise unfixable pain point. (In retrospect, we all think PEP 225 +was a bad idea too -- or at least far more complex than it needed to +be.) + +**Add a new @ (or whatever) operator that has some other meaning in +general Python, and then overload it in numeric code:** This was the +approach taken by PEP 211, which proposed defining ``@`` to be the +equivalent of ``itertools.product``. The problem with this is that +when taken on its own terms, it's pretty clear that +``itertools.product`` doesn't actually need a dedicated operator. It +hasn't even been deemed worth of a builtin. (During discussions of +this PEP, a similar suggestion was made to define ``@`` as a general +purpose function composition operator, and this suffers from the same +problem; ``functools.compose`` isn't even useful enough to exist.) +Matrix multiplication has a uniquely strong rationale for inclusion as +an infix operator. There almost certainly don't exist any other +binary operations that will ever justify adding any other infix +operators to Python. + +**Add a .dot method to array types so as to allow "pseudo-infix" +A.dot(B) syntax:** This has been in numpy for some years, and in many +cases it's better than dot(A, B). But it's still much less readable +than real infix notation, and in particular still suffers from an +extreme overabundance of parentheses. See `Why should matrix +multiplication be infix?`_ above. + +**Use a 'with' block to toggle the meaning of * within a single code +block**: E.g., numpy could define a special context object so that +we'd have:: + + c = a * b # element-wise multiplication + with numpy.mul_as_dot: + c = a * b # matrix multiplication + +However, this has two serious problems: first, it requires that every +array-like type's ``__mul__`` method know how to check some global +state (``numpy.mul_is_currently_dot`` or whatever). This is fine if +``a`` and ``b`` are numpy objects, but the world contains many +non-numpy array-like objects. So this either requires non-local +coupling -- every numpy competitor library has to import numpy and +then check ``numpy.mul_is_currently_dot`` on every operation -- or +else it breaks duck-typing, with the above code doing radically +different things depending on whether ``a`` and ``b`` are numpy +objects or some other sort of object. Second, and worse, ``with`` +blocks are dynamically scoped, not lexically scoped; i.e., any +function that gets called inside the ``with`` block will suddenly find +itself executing inside the mul_as_dot world, and crash and burn +horribly -- if you're lucky. So this is a construct that could only +be used safely in rather limited cases (no function calls), and which +would make it very easy to shoot yourself in the foot without warning. + +**Use a language preprocessor that adds extra numerically-oriented +operators and perhaps other syntax:** (As per recent BDFL suggestion: +[#preprocessor]_) This suggestion seems based on the idea that +numerical code needs a wide variety of syntax additions. In fact, +given ``@``, most numerical users don't need any other operators or +syntax; it solves the one really painful problem that cannot be solved +by other means, and that causes painful reverberations through the +larger ecosystem. Defining a new language (presumably with its own +parser which would have to be kept in sync with Python's, etc.), just +to support a single binary operator, is neither practical nor +desireable. In the numerical context, Python's competition is +special-purpose numerical languages (Matlab, R, IDL, etc.). Compared +to these, Python's killer feature is exactly that one can mix +specialized numerical code with code for XML parsing, web page +generation, database access, network programming, GUI libraries, and +so forth, and we also gain major benefits from the huge variety of +tutorials, reference material, introductory classes, etc., which use +Python. Fragmenting "numerical Python" from "real Python" would be a +major source of confusion. A major motivation for this PEP is to +*reduce* fragmentation. Having to set up a preprocessor would be an +especially prohibitive complication for unsophisticated users. And we +use Python because we like Python! We don't want +almost-but-not-quite-Python. + +**Use overloading hacks to define a "new infix operator" like *dot*, +as in a well-known Python recipe:** (See: [#infix-hack]_) Beautiful is +better than ugly. This is... not beautiful. And not Pythonic. And +especially unfriendly to beginners, who are just trying to wrap their +heads around the idea that there's a coherent underlying system behind +these magic incantations that they're learning, when along comes an +evil hack like this that violates that system, creates bizarre error +messages when accidentally misused, and whose underlying mechanisms +can't be understood without deep knowledge of how object oriented +systems work. + +**Use a special "facade" type to support syntax like arr.M * arr:** +This is very similar to the previous proposal, in that the ``.M`` +attribute would basically return the same object as ``arr *dot` would, +and thus suffers the same objections about 'magicalness'. This +approach also has some non-obvious complexities: for example, while +``arr.M * arr`` must return an array, ``arr.M * arr.M`` and ``arr * +arr.M`` must return facade objects, or else ``arr.M * arr.M * arr`` +and ``arr * arr.M * arr`` will not work. But this means that facade +objects must be able to recognize both other array objects and other +facade objects (which creates additional complexity for writing +interoperating array types from different libraries who must now +recognize both each other's array types and their facade types). It +also creates pitfalls for users who may easily type ``arr * arr.M`` or +``arr.M * arr.M`` and expect to get back an array object; instead, +they will get a mysterious object that throws errors when they attempt +to use it. Basically with this approach users must be careful to +think of ``.M*`` as an indivisible unit that acts as an infix operator +-- and as infix-operator-like token strings go, at least ``*dot*`` +is prettier looking (look at its cute little ears!). + + +Discussions of this PEP +======================= + +Collected here for reference: + +* Github pull request containing much of the original discussion and + drafting: https://github.com/numpy/numpy/pull/4351 + +* sympy mailing list discussions of an early draft: + + * https://groups.google.com/forum/#!topic/sympy/22w9ONLa7qo + * https://groups.google.com/forum/#!topic/sympy/4tGlBGTggZY + +* sage-devel mailing list discussions of an early draft: + https://groups.google.com/forum/#!topic/sage-devel/YxEktGu8DeM + +* 13-Mar-2014 python-ideas thread: + https://mail.python.org/pipermail/python-ideas/2014-March/027053.html + +* numpy-discussion thread on whether to keep ``@@``: + http://mail.scipy.org/pipermail/numpy-discussion/2014-March/069448.html + +* numpy-discussion threads on precedence/associativity of ``@``: + * http://mail.scipy.org/pipermail/numpy-discussion/2014-March/069444.html + * http://mail.scipy.org/pipermail/numpy-discussion/2014-March/069605.html + + +References +========== + +.. [#preprocessor] From a comment by GvR on a G+ post by GvR; the + comment itself does not seem to be directly linkable: https://plus.google.com/115212051037621986145/posts/hZVVtJ9bK3u +.. [#infix-hack] http://code.activestate.com/recipes/384122-infix-operators/ + http://www.sagemath.org/doc/reference/misc/sage/misc/decorators.html#sage.misc.decorators.infix_operator +.. [#scipy-conf] http://conference.scipy.org/past.html +.. [#pydata-conf] http://pydata.org/events/ +.. [#lht] In this formula, :math:`\beta` is a vector or matrix of + regression coefficients, :math:`V` is the estimated + variance/covariance matrix for these coefficients, and we want to + test the null hypothesis that :math:`H\beta = r`; a large :math:`S` + then indicates that this hypothesis is unlikely to be true. For + example, in an analysis of human height, the vector :math:`\beta` + might contain one value which was the the average height of the + measured men, and another value which was the average height of the + measured women, and then setting :math:`H = [1, -1], r = 0` would + let us test whether men and women are the same height on + average. Compare to eq. 2.139 in + http://sfb649.wiwi.hu-berlin.de/fedc_homepage/xplore/tutorials/xegbohtmlnode17.html + + Example code is adapted from https://github.com/rerpy/rerpy/blob/0d274f85e14c3b1625acb22aed1efa85d122ecb7/rerpy/incremental_ls.py#L202 + +.. [#pycon-tutorials] Out of the 36 tutorials scheduled for PyCon 2014 + (https://us.pycon.org/2014/schedule/tutorials/), we guess that the + 8 below will almost certainly deal with matrices: + + * Dynamics and control with Python + + * Exploring machine learning with Scikit-learn + + * How to formulate a (science) problem and analyze it using Python + code + + * Diving deeper into Machine Learning with Scikit-learn + + * Data Wrangling for Kaggle Data Science Competitions – An etude + + * Hands-on with Pydata: how to build a minimal recommendation + engine. + + * Python for Social Scientists + + * Bayesian statistics made simple + + In addition, the following tutorials could easily involve matrices: + + * Introduction to game programming + + * mrjob: Snakes on a Hadoop *("We'll introduce some data science + concepts, such as user-user similarity, and show how to calculate + these metrics...")* + + * Mining Social Web APIs with IPython Notebook + + * Beyond Defaults: Creating Polished Visualizations Using Matplotlib + + This gives an estimated range of 8 to 12 / 36 = 22% to 33% of + tutorials dealing with matrices; saying ~20% then gives us some + wiggle room in case our estimates are high. + +.. [#sloc-details] SLOCs were defined as physical lines which contain + at least one token that is not a COMMENT, NEWLINE, ENCODING, + INDENT, or DEDENT. Counts were made by using ``tokenize`` module + from Python 3.2.3 to examine the tokens in all files ending ``.py`` + underneath some directory. Only tokens which occur at least once + in the source trees are included in the table. The counting script + is available `in the PEP repository + <http://hg.python.org/peps/file/tip/pep-0465/scan-ops.py>`_. + + Matrix multiply counts were estimated by counting how often certain + tokens which are used as matrix multiply function names occurred in + each package. This creates a small number of false positives for + scikit-learn, because we also count instances of the wrappers + around ``dot`` that this package uses, and so there are a few dozen + tokens which actually occur in ``import`` or ``def`` statements. + + All counts were made using the latest development version of each + project as of 21 Feb 2014. + + 'stdlib' is the contents of the Lib/ directory in commit + d6aa3fa646e2 to the cpython hg repository, and treats the following + tokens as indicating matrix multiply: n/a. + + 'scikit-learn' is the contents of the sklearn/ directory in commit + 69b71623273ccfc1181ea83d8fb9e05ae96f57c7 to the scikit-learn + repository (https://github.com/scikit-learn/scikit-learn), and + treats the following tokens as indicating matrix multiply: ``dot``, + ``fast_dot``, ``safe_sparse_dot``. + + 'nipy' is the contents of the nipy/ directory in commit + 5419911e99546401b5a13bd8ccc3ad97f0d31037 to the nipy repository + (https://github.com/nipy/nipy/), and treats the following tokens as + indicating matrix multiply: ``dot``. + +.. [#blas-fork] BLAS libraries have a habit of secretly spawning + threads, even when used from single-threaded programs. And threads + play very poorly with ``fork()``; the usual symptom is that + attempting to perform linear algebra in a child process causes an + immediate deadlock. + +.. [#threads-2008] http://fperez.org/py4science/numpy-pep225/numpy-pep225.html + +.. [#broadcasting] http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html + +.. [#matmul-other-langs] http://mail.scipy.org/pipermail/scipy-user/2014-February/035499.html + +.. [#github-details] Counts were produced by manually entering the + string ``"import foo"`` or ``"from foo import"`` (with quotes) into + the Github code search page, e.g.: + https://github.com/search?q=%22import+numpy%22&ref=simplesearch&type=Code + on 2014-04-10 at ~21:00 UTC. The reported values are the numbers + given in the "Languages" box on the lower-left corner, next to + "Python". This also causes some undercounting (e.g., leaving out + Cython code, and possibly one should also count HTML docs and so + forth), but these effects are negligible (e.g., only ~1% of numpy + usage appears to occur in Cython code, and probably even less for + the other modules listed). The use of this box is crucial, + however, because these counts appear to be stable, while the + "overall" counts listed at the top of the page ("We've found ___ + code results") are highly variable even for a single search -- + simply reloading the page can cause this number to vary by a factor + of 2 (!!). (They do seem to settle down if one reloads the page + repeatedly, but nonetheless this is spooky enough that it seemed + better to avoid these numbers.) + + These numbers should of course be taken with multiple grains of + salt; it's not clear how representative Github is of Python code in + general, and limitations of the search tool make it impossible to + get precise counts. AFAIK this is the best data set currently + available, but it'd be nice if it were better. In particular: + + * Lines like ``import sys, os`` will only be counted in the ``sys`` + row. + + * A file containing both ``import X`` and ``from X import`` will be + counted twice + + * Imports of the form ``from X.foo import ...`` are missed. We + could catch these by instead searching for "from X", but this is + a common phrase in English prose, so we'd end up with false + positives from comments, strings, etc. For many of the modules + considered this shouldn't matter too much -- for example, the + stdlib modules have flat namespaces -- but it might especially + lead to undercounting of django, scipy, and twisted. + + Also, it's possible there exist other non-stdlib modules we didn't + think to test that are even more-imported than numpy -- though we + tried quite a few of the obvious suspects. If you find one, let us + know! The modules tested here were chosen based on a combination + of intuition and the top-100 list at pypi-ranking.info. + + Fortunately, it doesn't really matter if it turns out that numpy + is, say, merely the *third* most-imported non-stdlib module, since + the point is just that numeric programming is a common and + mainstream activity. + + Finally, we should point out the obvious: whether a package is + import**ed** is rather different from whether it's import**ant**. + No-one's claiming numpy is "the most important package" or anything + like that. Certainly more packages depend on distutils, e.g., then + depend on numpy -- and far fewer source files import distutils than + import numpy. But this is fine for our present purposes. Most + source files don't import distutils because most source files don't + care how they're distributed, so long as they are; these source + files thus don't care about details of how distutils' API works. + This PEP is in some sense about changing how numpy's and related + packages' APIs work, so the relevant metric is to look at source + files that are choosing to directly interact with that API, which + is sort of like what we get by looking at import statements. + +.. [#hugunin] The first such proposal occurs in Jim Hugunin's very + first email to the matrix SIG in 1995, which lays out the first + draft of what became Numeric. He suggests using ``*`` for + elementwise multiplication, and ``%`` for matrix multiplication: + https://mail.python.org/pipermail/matrix-sig/1995-August/000002.html + +.. [#atat-discussion] http://mail.scipy.org/pipermail/numpy-discussion/2014-March/069502.html + +.. [#associativity-discussions] + http://mail.scipy.org/pipermail/numpy-discussion/2014-March/069444.html + http://mail.scipy.org/pipermail/numpy-discussion/2014-March/069605.html + +.. [#oil-industry-versus-right-associativity] + http://mail.scipy.org/pipermail/numpy-discussion/2014-March/069610.html + +.. [#numpy-associativity-counts] + http://mail.scipy.org/pipermail/numpy-discussion/2014-March/069578.html + +.. [#group-associativity] + http://mail.scipy.org/pipermail/numpy-discussion/2014-March/069530.html + + +Copyright +========= + +This document has been placed in the public domain. diff --git a/doc/release/1.10.0-notes.rst b/doc/release/1.10.0-notes.rst new file mode 100644 index 000000000..cfba0a5cd --- /dev/null +++ b/doc/release/1.10.0-notes.rst @@ -0,0 +1,107 @@ +NumPy 1.10.0 Release Notes +************************** + +This release supports Python 2.6 - 2.7 and 3.2 - 3.4. + + +Highlights +========== +* numpy.distutils now supports parallel compilation via the --jobs/-j argument + passed to setup.py build + + +Dropped Support +=============== +* The polytemplate.py file has been removed. +* The _dotblas module is no longer available. +* The testcalcs.py file has been removed. + + +Future Changes +============== +* The SafeEval class will be removed. +* The alterdot and restoredot functions will be removed. + + +Compatibility notes +=================== +NPY_RELAXED_STRIDE_CHECKING is now true by default. + + +New Features +============ + +`np.cbrt` to compute cube root for real floats +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +`np.cbrt` wraps the C99 cube root function `cbrt`. +Compared to `np.power(x, 1./3.)` it is well defined for negative real floats +and a bit faster. + +numpy.distutils now allows parallel compilation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +By passing `--jobs=n` or `-j n` to `setup.py build` the compilation of +extensions is now performed in `n` parallel processes. +The parallelization is limited to files within one extension so projects using +Cython will not profit because it builds extensions from single files. + + +Improvements +============ + +`np.digitize` using binary search +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +`np.digitize` is now implemented in terms of `np.searchsorted`. This means +that a binary search is used to bin the values, which scales much better +for larger number of bins than the previous linear search. It also removes +the requirement for the input array to be 1-dimensional. + +`np.poly` now casts integer inputs to float +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +`np.poly` will now cast 1-dimensional input arrays of integer type to double +precision floating point, to prevent integer overflow when computing the monic +polynomial. It is still possible to obtain higher precision results by +passing in an array of object type, filled e.g. with Python ints. + +`np.interp` can now be used with periodic functions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +`np.interp` now has a new parameter `period` that supplies the period of the +input data `xp`. In such case, the input data is properly normalized to the +given period and one end point is added to each extremity of `xp` in order to +close the previous and the next period cycles, resulting in the correct +interpolation behavior. + + +Changes +======= + +dotblas functionality moved to multiarray +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The cblas versions of dot, inner, and vdot have been integrated into +the multiarray module. In particular, vdot is now a multiarray function, +which it was not before. + +stricter check of gufunc signature compliance +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Inputs to generalized universal functions are now more strictly checked +against the function's signature: all core dimensions are now required to +be present in input arrays; core dimensions with the same label must have +the exact same size; and output core dimension's must be specified, either +by a same label input core dimension or by a passed-in output array. + + +Deprecations +============ + +SafeEval +~~~~~~~~ +The SafeEval class in numpy/lib/utils.py is deprecated and will be removed +in the next release. + +alterdot, restoredot +~~~~~~~~~~~~~~~~~~~~ +The alterdot and restoredot functions no longer do anything, and are +deprecated. + +pkgload, PackageLoader +~~~~~~~~~~~~~~~~~~~~~~ +These ways of loading packages are now deprecated. diff --git a/doc/release/1.8.2-notes.rst b/doc/release/1.8.2-notes.rst new file mode 100644 index 000000000..c21f81a27 --- /dev/null +++ b/doc/release/1.8.2-notes.rst @@ -0,0 +1,19 @@ +NumPy 1.8.2 Release Notes +************************* + +This is a bugfix only release in the 1.8.x series. + +Issues fixed +============ + +* gh-4836: partition produces wrong results for multiple selections in equal ranges +* gh-4656: Make fftpack._raw_fft threadsafe +* gh-4628: incorrect argument order to _copyto in in np.nanmax, np.nanmin +* gh-4642: Hold GIL for converting dtypes types with fields +* gh-4733: fix np.linalg.svd(b, compute_uv=False) +* gh-4853: avoid unaligned simd load on reductions on i386 +* gh-4722: Fix seg fault converting empty string to object +* gh-4613: Fix lack of NULL check in array_richcompare +* gh-4774: avoid unaligned access for strided byteswap +* gh-650: Prevent division by zero when creating arrays from some buffers +* gh-4602: ifort has issues with optimization flag O2, use O1 diff --git a/doc/release/1.9.0-notes.rst b/doc/release/1.9.0-notes.rst index c00f7f9d6..37343ec6d 100644 --- a/doc/release/1.9.0-notes.rst +++ b/doc/release/1.9.0-notes.rst @@ -6,8 +6,6 @@ This release supports Python 2.6 - 2.7 and 3.2 - 3.4. Highlights ========== -* Addition of `__numpy_ufunc__` to allow overriding ufuncs in ndarray - subclasses. * Numerous performance improvements in various areas, most notably indexing and operations on small arrays are significantly faster. Indexing operations now also release the GIL. @@ -35,6 +33,8 @@ Future Changes * String version checks will break because, e.g., '1.9' > '1.10' is True. A NumpyVersion class has been added that can be used for such comparisons. * The diagonal and diag functions will return writeable views in 1.10.0 +* The `S` and/or `a` dtypes may be changed to represent Python strings + instead of bytes, in Python 3 these two types are very different. Compatibility notes @@ -176,6 +176,11 @@ introduced in advanced indexing operations: * Indexing with more then one ellipsis (``...``) is deprecated. +Non-integer reduction axis indexes are deprecated +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Non-integer axis indexes to reduction ufuncs like `add.reduce` or `sum` are +deprecated. + ``promote_types`` and string dtype ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``promote_types`` function now returns a valid string length when given an @@ -262,13 +267,6 @@ ufunc reductions do since 1.7. One can now say axis=(index, index) to pick a list of axes for the reduction. The ``keepdims`` keyword argument was also added to allow convenient broadcasting to arrays of the original shape. -Ufunc and Dot Overrides -~~~~~~~~~~~~~~~~~~~~~~~ -For better compatibility with external objects you can now override -universal functions (ufuncs), ``numpy.core._dotblas.dot``, and -``numpy.core.multiarray.dot`` (the numpy.dot functions). By defining a -``__numpy_ufunc__`` method. - Dtype parameter added to ``np.linspace`` and ``np.logspace`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The returned data type from the ``linspace`` and ``logspace`` functions can @@ -336,6 +334,12 @@ in either an error being raised, or wrong results computed. Improvements ============ +Better numerical stability for sum in some cases +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Pairwise summation is now used in the sum method, but only along the fast +axis and for groups of the values <= 8192 in length. This should also +improve the accuracy of var and std in some common cases. + Percentile implemented in terms of ``np.partition`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``np.percentile`` has been implemented in terms of ``np.partition`` which diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 036185782..e77dfc31e 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -41,7 +41,7 @@ Numpy provides several hooks that classes can customize: .. function:: class.__numpy_ufunc__(self, ufunc, method, i, inputs, **kwargs) - .. versionadded:: 1.9 + .. versionadded:: 1.10 Any class (ndarray subclass or not) can define this method to override behavior of Numpy's ufuncs. This works quite similarly to @@ -267,13 +267,6 @@ they inherit from the ndarray): :meth:`.flush() <memmap.flush>` which must be called manually by the user to ensure that any changes to the array actually get written to disk. -.. note:: - - Memory-mapped arrays use the the Python memory-map object which - (prior to Python 2.5) does not allow files to be larger than a - certain size depending on the platform. This size is always - < 2GB even on 64-bit systems. - .. autosummary:: :toctree: generated/ diff --git a/doc/source/reference/arrays.indexing.rst b/doc/source/reference/arrays.indexing.rst index d04f89897..ef0180e0f 100644 --- a/doc/source/reference/arrays.indexing.rst +++ b/doc/source/reference/arrays.indexing.rst @@ -31,9 +31,9 @@ integer, or a tuple of slice objects and integers. :const:`Ellipsis` and :const:`newaxis` objects can be interspersed with these as well. In order to remain backward compatible with a common usage in Numeric, basic slicing is also initiated if the selection object is -any sequence (such as a :class:`list`) containing :class:`slice` +any non-ndarray sequence (such as a :class:`list`) containing :class:`slice` objects, the :const:`Ellipsis` object, or the :const:`newaxis` object, -but no integer arrays or other embedded sequences. +but not for integer arrays or other embedded sequences. .. index:: triple: ndarray; special methods; getslice @@ -46,8 +46,8 @@ scalar <arrays.scalars>` representing the corresponding item. As in Python, all indices are zero-based: for the *i*-th index :math:`n_i`, the valid range is :math:`0 \le n_i < d_i` where :math:`d_i` is the *i*-th element of the shape of the array. Negative indices are -interpreted as counting from the end of the array (*i.e.*, if *i < 0*, -it means :math:`n_i + i`). +interpreted as counting from the end of the array (*i.e.*, if +:math:`n_i < 0`, it means :math:`n_i + d_i`). All arrays generated by basic slicing are always :term:`views <view>` @@ -84,7 +84,7 @@ concepts to remember include: - Assume *n* is the number of elements in the dimension being sliced. Then, if *i* is not given it defaults to 0 for *k > 0* and - *n* for *k < 0* . If *j* is not given it defaults to *n* for *k > 0* + *n - 1* for *k < 0* . If *j* is not given it defaults to *n* for *k > 0* and -1 for *k < 0* . If *k* is not given it defaults to 1. Note that ``::`` is the same as ``:`` and means select all indices along this axis. diff --git a/doc/source/reference/c-api.array.rst b/doc/source/reference/c-api.array.rst index 23355bc91..e3723c46f 100644 --- a/doc/source/reference/c-api.array.rst +++ b/doc/source/reference/c-api.array.rst @@ -1632,11 +1632,11 @@ Conversion Shape Manipulation ^^^^^^^^^^^^^^^^^^ -.. cfunction:: PyObject* PyArray_Newshape(PyArrayObject* self, PyArray_Dims* newshape) +.. cfunction:: PyObject* PyArray_Newshape(PyArrayObject* self, PyArray_Dims* newshape, NPY_ORDER order) Result will be a new array (pointing to the same memory location - as *self* if possible), but having a shape given by *newshape* - . If the new shape is not compatible with the strides of *self*, + as *self* if possible), but having a shape given by *newshape*. + If the new shape is not compatible with the strides of *self*, then a copy of the array with the new specified shape will be returned. @@ -1645,6 +1645,7 @@ Shape Manipulation Equivalent to :meth:`ndarray.reshape` (*self*, *shape*) where *shape* is a sequence. Converts *shape* to a :ctype:`PyArray_Dims` structure and calls :cfunc:`PyArray_Newshape` internally. + For back-ward compatability -- Not recommended .. cfunction:: PyObject* PyArray_Squeeze(PyArrayObject* self) @@ -1805,14 +1806,23 @@ Item selection and manipulation :cfunc:`PyArray_Sort` (...) can also be used to sort the array directly. -.. cfunction:: PyObject* PyArray_SearchSorted(PyArrayObject* self, PyObject* values) +.. cfunction:: PyObject* PyArray_SearchSorted(PyArrayObject* self, PyObject* values, NPY_SEARCHSIDE side, PyObject* perm) - Equivalent to :meth:`ndarray.searchsorted` (*self*, *values*). Assuming - *self* is a 1-d array in ascending order representing bin - boundaries then the output is an array the same shape as *values* - of bin numbers, giving the bin into which each item in *values* - would be placed. No checking is done on whether or not self is in - ascending order. + Equivalent to :meth:`ndarray.searchsorted` (*self*, *values*, *side*, + *perm*). Assuming *self* is a 1-d array in ascending order, then the + output is an array of indices the same shape as *values* such that, if + the elements in *values* were inserted before the indices, the order of + *self* would be preserved. No checking is done on whether or not self is + in ascending order. + + The *side* argument indicates whther the index returned should be that of + the first suitable location (if :cdata:`NPY_SEARCHLEFT`) or of the last + (if :cdata:`NPY_SEARCHRIGHT`). + + The *sorter* argument, if not ``NULL``, must be a 1D array of integer + indices the same length as *self*, that sorts it into ascending order. + This is typically the result of a call to :cfunc:`PyArray_ArgSort` (...) + Binary search is used to find the required insertion points. .. cfunction:: int PyArray_Partition(PyArrayObject *self, PyArrayObject * ktharray, int axis, NPY_SELECTKIND which) @@ -1886,10 +1896,10 @@ Calculation .. note:: - The out argument specifies where to place the result. If out is - NULL, then the output array is created, otherwise the output is - placed in out which must be the correct size and type. A new - reference to the ouput array is always returned even when out + The out argument specifies where to place the result. If out is + NULL, then the output array is created, otherwise the output is + placed in out which must be the correct size and type. A new + reference to the ouput array is always returned even when out is not NULL. The caller of the routine has the responsability to ``DECREF`` out if not NULL or a memory-leak will occur. @@ -3103,6 +3113,12 @@ Group 1 Useful to regain the GIL in situations where it was released using the BEGIN form of this macro. + .. cfunction:: NPY_BEGIN_THREADS_THRESHOLDED(int loop_size) + + Useful to release the GIL only if *loop_size* exceeds a + minimum threshold, currently set to 500. Should be matched + with a .. cmacro::`NPY_END_THREADS` to regain the GIL. + Group 2 """"""" diff --git a/doc/source/reference/c-api.generalized-ufuncs.rst b/doc/source/reference/c-api.generalized-ufuncs.rst index 14f33efcb..92dc8aec0 100644 --- a/doc/source/reference/c-api.generalized-ufuncs.rst +++ b/doc/source/reference/c-api.generalized-ufuncs.rst @@ -18,30 +18,52 @@ arguments is called the "signature" of a ufunc. For example, the ufunc numpy.add has signature ``(),()->()`` defining two scalar inputs and one scalar output. -Another example is the function ``inner1d(a,b)`` with a signature of -``(i),(i)->()``. This applies the inner product along the last axis of +Another example is the function ``inner1d(a, b)`` with a signature of +``(i),(i)->()``. This applies the inner product along the last axis of each input, but keeps the remaining indices intact. -For example, where ``a`` is of shape ``(3,5,N)`` -and ``b`` is of shape ``(5,N)``, this will return an output of shape ``(3,5)``. +For example, where ``a`` is of shape ``(3, 5, N)`` and ``b`` is of shape +``(5, N)``, this will return an output of shape ``(3,5)``. The underlying elementary function is called ``3 * 5`` times. In the signature, we specify one core dimension ``(i)`` for each input and zero core dimensions ``()`` for the output, since it takes two 1-d arrays and returns a scalar. By using the same name ``i``, we specify that the two -corresponding dimensions should be of the same size (or one of them is -of size 1 and will be broadcasted). +corresponding dimensions should be of the same size. The dimensions beyond the core dimensions are called "loop" dimensions. In -the above example, this corresponds to ``(3,5)``. - -The usual numpy "broadcasting" rules apply, where the signature -determines how the dimensions of each input/output object are split -into core and loop dimensions: - -#. While an input array has a smaller dimensionality than the corresponding - number of core dimensions, 1's are pre-pended to its shape. +the above example, this corresponds to ``(3, 5)``. + +The signature determines how the dimensions of each input/output array are +split into core and loop dimensions: + +#. Each dimension in the signature is matched to a dimension of the + corresponding passed-in array, starting from the end of the shape tuple. + These are the core dimensions, and they must be present in the arrays, or + an error will be raised. +#. Core dimensions assigned to the same label in the signature (e.g. the + ``i`` in ``inner1d``'s ``(i),(i)->()``) must have exactly matching sizes, + no broadcasting is performed. #. The core dimensions are removed from all inputs and the remaining - dimensions are broadcasted; defining the loop dimensions. -#. The output is given by the loop dimensions plus the output core dimensions. + dimensions are broadcast together, defining the loop dimensions. +#. The shape of each output is determined from the loop dimensions plus the + output's core dimensions + +Typically, the size of all core dimensions in an output will be determined by +the size of a core dimension with the same label in an input array. This is +not a requirement, and it is possible to define a signature where a label +comes up for the first time in an output, although some precautions must be +taken when calling such a function. An example would be the function +``euclidean_pdist(a)``, with signature ``(n,d)->(p)``, that given an array of +``n`` ``d``-dimensional vectors, computes all unique pairwise Euclidean +distances among them. The output dimension ``p`` must therefore be equal to +``n * (n - 1) / 2``, but it is the caller's responsibility to pass in an +output array of the right size. If the size of a core dimension of an output +cannot be determined from a passed in input or output array, an error will be +raised. + +Note: Prior to Numpy 1.10.0, less strict checks were in place: missing core +dimensions were created by prepending 1's to the shape as necessary, core +dimensions with the same label were broadcast together, and undetermined +dimensions were created with size 1. Definitions @@ -70,7 +92,7 @@ Core Dimension Dimension Name A dimension name represents a core dimension in the signature. Different dimensions may share a name, indicating that they are of - the same size (or are broadcastable). + the same size. Dimension Index A dimension index is an integer representing a dimension name. It @@ -93,8 +115,7 @@ following format: * Dimension lists for different arguments are separated by ``","``. Input/output arguments are separated by ``"->"``. * If one uses the same dimension name in multiple locations, this - enforces the same size (or broadcastable size) of the corresponding - dimensions. + enforces the same size of the corresponding dimensions. The formal syntax of signatures is as follows:: @@ -111,10 +132,9 @@ The formal syntax of signatures is as follows:: Notes: #. All quotes are for clarity. -#. Core dimensions that share the same name must be broadcastable, as - the two ``i`` in our example above. Each dimension name typically - corresponding to one level of looping in the elementary function's - implementation. +#. Core dimensions that share the same name must have the exact same size. + Each dimension name typically corresponds to one level of looping in the + elementary function's implementation. #. White spaces are ignored. Here are some examples of signatures: diff --git a/doc/source/reference/c-api.types-and-structures.rst b/doc/source/reference/c-api.types-and-structures.rst index f1e216a5c..473e25010 100644 --- a/doc/source/reference/c-api.types-and-structures.rst +++ b/doc/source/reference/c-api.types-and-structures.rst @@ -244,7 +244,7 @@ PyArrayDescr_Type Indicates that items of this data-type must be reference counted (using :cfunc:`Py_INCREF` and :cfunc:`Py_DECREF` ). - .. cvar:: NPY_ITEM_LISTPICKLE + .. cvar:: NPY_LIST_PICKLE Indicates arrays of this data-type must be converted to a list before pickling. @@ -646,9 +646,9 @@ PyUFunc_Type void **data; int ntypes; int check_return; - char *name; + const char *name; char *types; - char *doc; + const char *doc; void *ptr; PyObject *obj; PyObject *userloops; @@ -1031,9 +1031,9 @@ PyArray_Chunk This is equivalent to the buffer object structure in Python up to the ptr member. On 32-bit platforms (*i.e.* if :cdata:`NPY_SIZEOF_INT` - == :cdata:`NPY_SIZEOF_INTP` ) or in Python 2.5, the len member also - matches an equivalent member of the buffer object. It is useful to - represent a generic single- segment chunk of memory. + == :cdata:`NPY_SIZEOF_INTP`), the len member also matches an equivalent + member of the buffer object. It is useful to represent a generic + single-segment chunk of memory. .. code-block:: c diff --git a/doc/source/reference/c-api.ufunc.rst b/doc/source/reference/c-api.ufunc.rst index 71abffd04..3673958d9 100644 --- a/doc/source/reference/c-api.ufunc.rst +++ b/doc/source/reference/c-api.ufunc.rst @@ -114,7 +114,6 @@ Functions data type, it will be internally upcast to the int_ (or uint) data type. - :param doc: Allows passing in a documentation string to be stored with the ufunc. The documentation string should not contain the name @@ -128,6 +127,21 @@ Functions structure and it does get set with this value when the ufunc object is created. +.. cfunction:: PyObject* PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction* func, + void** data, char* types, int ntypes, int nin, int nout, int identity, + char* name, char* doc, int check_return, char *signature) + + This function is very similar to PyUFunc_FromFuncAndData above, but has + an extra *signature* argument, to define generalized universal functions. + Similarly to how ufuncs are built around an element-by-element operation, + gufuncs are around subarray-by-subarray operations, the signature defining + the subarrays to operate on. + + :param signature: + The signature for the new gufunc. Setting it to NULL is equivalent + to calling PyUFunc_FromFuncAndData. A copy of the string is made, + so the passed in buffer can be freed. + .. cfunction:: int PyUFunc_RegisterLoopForType(PyUFuncObject* ufunc, int usertype, PyUFuncGenericFunction function, int* arg_types, void* data) diff --git a/doc/source/reference/routines.array-creation.rst b/doc/source/reference/routines.array-creation.rst index 23b35243b..c7c6ab815 100644 --- a/doc/source/reference/routines.array-creation.rst +++ b/doc/source/reference/routines.array-creation.rst @@ -20,6 +20,8 @@ Ones and zeros ones_like zeros zeros_like + full + full_like From existing data ------------------ diff --git a/doc/source/reference/routines.array-manipulation.rst b/doc/source/reference/routines.array-manipulation.rst index ca97bb479..81af0a315 100644 --- a/doc/source/reference/routines.array-manipulation.rst +++ b/doc/source/reference/routines.array-manipulation.rst @@ -54,6 +54,8 @@ Changing kind of array asmatrix asfarray asfortranarray + ascontiguousarray + asarray_chkfinite asscalar require diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst index 5cb38e83f..66bcb1f1c 100644 --- a/doc/source/reference/routines.ma.rst +++ b/doc/source/reference/routines.ma.rst @@ -65,6 +65,8 @@ Inspecting the array ma.nonzero ma.shape ma.size + ma.is_masked + ma.is_mask ma.MaskedArray.data ma.MaskedArray.mask @@ -141,6 +143,7 @@ Joining arrays ma.column_stack ma.concatenate + ma.append ma.dstack ma.hstack ma.vstack diff --git a/doc/source/reference/routines.maskna.rst b/doc/source/reference/routines.maskna.rst deleted file mode 100644 index 2910acbac..000000000 --- a/doc/source/reference/routines.maskna.rst +++ /dev/null @@ -1,11 +0,0 @@ -NA-Masked Array Routines -======================== - -.. currentmodule:: numpy - -NA Values ---------- -.. autosummary:: - :toctree: generated/ - - isna diff --git a/doc/source/reference/routines.polynomials.classes.rst b/doc/source/reference/routines.polynomials.classes.rst index 14729f08b..c40795434 100644 --- a/doc/source/reference/routines.polynomials.classes.rst +++ b/doc/source/reference/routines.polynomials.classes.rst @@ -211,7 +211,7 @@ constant are 0, but both can be specified.:: In the first case the lower bound of the integration is set to -1 and the integration constant is 0. In the second the constant of integration is set to 1 as well. Differentiation is simpler since the only option is the -number times the polynomial is differentiated:: +number of times the polynomial is differentiated:: >>> p = P([1, 2, 3]) >>> p.deriv(1) @@ -270,7 +270,7 @@ polynomials up to degree 5 are plotted below. >>> import matplotlib.pyplot as plt >>> from numpy.polynomial import Chebyshev as T >>> x = np.linspace(-1, 1, 100) - >>> for i in range(6): ax = plt.plot(x, T.basis(i)(x), lw=2, label="T_%d"%i) + >>> for i in range(6): ax = plt.plot(x, T.basis(i)(x), lw=2, label="$T_%d$"%i) ... >>> plt.legend(loc="upper left") <matplotlib.legend.Legend object at 0x3b3ee10> @@ -284,7 +284,7 @@ The same plots over the range -2 <= `x` <= 2 look very different: >>> import matplotlib.pyplot as plt >>> from numpy.polynomial import Chebyshev as T >>> x = np.linspace(-2, 2, 100) - >>> for i in range(6): ax = plt.plot(x, T.basis(i)(x), lw=2, label="T_%d"%i) + >>> for i in range(6): ax = plt.plot(x, T.basis(i)(x), lw=2, label="$T_%d$"%i) ... >>> plt.legend(loc="lower right") <matplotlib.legend.Legend object at 0x3b3ee10> diff --git a/doc/source/reference/routines.sort.rst b/doc/source/reference/routines.sort.rst index 2b36aec75..c22fa0cd6 100644 --- a/doc/source/reference/routines.sort.rst +++ b/doc/source/reference/routines.sort.rst @@ -39,4 +39,3 @@ Counting :toctree: generated/ count_nonzero - count_reduce_items diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index 2ae794f59..3d6112058 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -313,16 +313,15 @@ advanced usage and will not typically be used. .. versionadded:: 1.6 + May be 'no', 'equiv', 'safe', 'same_kind', or 'unsafe'. + See :func:`can_cast` for explanations of the parameter values. + Provides a policy for what kind of casting is permitted. For compatibility - with previous versions of NumPy, this defaults to 'unsafe'. May be 'no', - 'equiv', 'safe', 'same_kind', or 'unsafe'. See :func:`can_cast` for - explanations of the parameter values. - - In a future version of numpy, this argument will default to - 'same_kind'. As part of this transition, starting in version 1.7, - ufuncs will produce a DeprecationWarning for calls which are - allowed under the 'unsafe' rules, but not under the 'same_kind' - rules. + with previous versions of NumPy, this defaults to 'unsafe' for numpy < 1.7. + In numpy 1.7 a transition to 'same_kind' was begun where ufuncs produce a + DeprecationWarning for calls which are allowed under the 'unsafe' + rules, but not under the 'same_kind' rules. In numpy 1.10 the default + will be 'same_kind'. *order* diff --git a/doc/source/release.rst b/doc/source/release.rst index eb366661f..657eb9a5d 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -2,7 +2,10 @@ Release Notes ************* +.. include:: ../release/1.10.0-notes.rst .. include:: ../release/1.9.0-notes.rst +.. include:: ../release/1.8.2-notes.rst +.. include:: ../release/1.8.1-notes.rst .. include:: ../release/1.8.0-notes.rst .. include:: ../release/1.7.2-notes.rst .. include:: ../release/1.7.1-notes.rst diff --git a/doc/source/user/c-info.how-to-extend.rst b/doc/source/user/c-info.how-to-extend.rst index db6c8e118..4d54c0eef 100644 --- a/doc/source/user/c-info.how-to-extend.rst +++ b/doc/source/user/c-info.how-to-extend.rst @@ -82,7 +82,7 @@ a variable indicating whether the method uses keyword arguments or not, and docstrings. These are explained in the next section. If you want to add constants to the module, then you store the returned value from Py_InitModule which is a module object. The most general way to -add itmes to the module is to get the module dictionary using +add items to the module is to get the module dictionary using PyModule_GetDict(module). With the module dictionary, you can add whatever you like to the module manually. An easier way to add objects to the module is to use one of three additional Python C-API calls @@ -212,7 +212,7 @@ special attention to the difference between 'N' and 'O' in the format string or you can easily create memory leaks. The 'O' format string increments the reference count of the :ctype:`PyObject *` C-variable it corresponds to, while the 'N' format string steals a reference to the -corresponding :ctype:`PyObject *` C-variable. You should use 'N' if you ave +corresponding :ctype:`PyObject *` C-variable. You should use 'N' if you have already created a reference for the object and just want to give that reference to the tuple. You should use 'O' if you only have a borrowed reference to an object and need to create one to provide for the diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst index 985d478e0..8dfd39beb 100644 --- a/doc/source/user/c-info.python-as-glue.rst +++ b/doc/source/user/c-info.python-as-glue.rst @@ -65,7 +65,7 @@ high-level language for scientific and engineering programming. Their are two basic approaches to calling compiled code: writing an extension module that is then imported to Python using the import command, or calling a shared-library subroutine directly from Python -using the ctypes module (included in the standard distribution with +using the ctypes module (included in the standard distribution since Python 2.5). The first method is the most common (but with the inclusion of ctypes into Python 2.5 this status may change). @@ -249,8 +249,14 @@ necessary to tell f2py that the value of n depends on the input a (so that it won't try to create the variable n until the variable a is created). +After modifying ``add.pyf``, the new python module file can be generated +by compiling both ``add.f95`` and ``add.pyf``:: + + f2py -c add.pyf add.f95 + The new interface has docstring: + >>> import add >>> print add.zadd.__doc__ zadd - Function signature: c = zadd(a,b) @@ -896,14 +902,13 @@ libraries), or weave (for inline C-code). ctypes ====== -Ctypes is a python extension module (downloaded separately for Python -<2.5 and included with Python 2.5) that allows you to call an -arbitrary function in a shared library directly from Python. This -approach allows you to interface with C-code directly from Python. -This opens up an enormous number of libraries for use from Python. The -drawback, however, is that coding mistakes can lead to ugly program -crashes very easily (just as can happen in C) because there is little -type or bounds checking done on the parameters. This is especially +Ctypes is a Python extension module, included in the stdlib, that +allows you to call an arbitrary function in a shared library directly +from Python. This approach allows you to interface with C-code directly +from Python. This opens up an enormous number of libraries for use from +Python. The drawback, however, is that coding mistakes can lead to ugly +program crashes very easily (just as can happen in C) because there is +little type or bounds checking done on the parameters. This is especially true when array data is passed in as a pointer to a raw memory location. The responsibility is then on you that the subroutine will not access memory outside the actual array area. But, if you don't diff --git a/doc/source/user/install.rst b/doc/source/user/install.rst index 9d6f61e65..29aeff6a3 100644 --- a/doc/source/user/install.rst +++ b/doc/source/user/install.rst @@ -37,15 +37,16 @@ Most of the major distributions provide packages for NumPy, but these can lag behind the most recent NumPy release. Pre-built binary packages for Ubuntu are available on the `scipy ppa <https://edge.launchpad.net/~scipy/+archive/ppa>`_. Redhat binaries are -available in the `EPD <http://www.enthought.com/products/epd.php>`_. +available in the `Enthought Canopy +<https://www.enthought.com/products/canopy/>`_. Mac OS X -------- A universal binary installer for NumPy is available from the `download site <http://sourceforge.net/project/showfiles.php?group_id=1369& -package_id=175103>`_. The `EPD <http://www.enthought.com/products/epd.php>`_ -provides NumPy binaries. +package_id=175103>`_. The `Enthought Canopy +<https://www.enthought.com/products/canopy/>`_ provides NumPy binaries. Building from source ==================== @@ -58,18 +59,19 @@ Prerequisites Building NumPy requires the following software installed: -1) Python 2.4.x, 2.5.x or 2.6.x +1) Python 2.6.x, 2.7.x, 3.2.x or newer - On Debian and derivative (Ubuntu): python, python-dev + On Debian and derivatives (Ubuntu): python, python-dev (or python3-dev) On Windows: the official python installer at `www.python.org <http://www.python.org>`_ is enough Make sure that the Python package distutils is installed before - continuing. For example, in Debian GNU/Linux, distutils is included - in the python-dev package. + continuing. For example, in Debian GNU/Linux, installing python-dev + also installs distutils. - Python must also be compiled with the zlib module enabled. + Python must also be compiled with the zlib module enabled. This is + practically always the case with pre-packaged Pythons. 2) Compilers diff --git a/doc/sphinxext b/doc/sphinxext -Subproject 447dd0b59c2fe91ca9643701036d3d04919ddc7 +Subproject 84cc897d266e0afc28fc5296edf01afb0800547 diff --git a/numpy/_import_tools.py b/numpy/_import_tools.py index 526217359..9b1942e8d 100644 --- a/numpy/_import_tools.py +++ b/numpy/_import_tools.py @@ -2,6 +2,7 @@ from __future__ import division, absolute_import, print_function import os import sys +import warnings __all__ = ['PackageLoader'] @@ -162,7 +163,10 @@ class PackageLoader(object): postpone= : bool when True, don't load packages [default: False] - """ + """ + warnings.warn('pkgload and PackageLoader are obsolete ' + 'and will be removed in a future version of numpy', + DeprecationWarning) frame = self.parent_frame self.info_modules = {} if options.get('force', False): diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py index 86ea4b8b6..a88a782b4 100644 --- a/numpy/add_newdocs.py +++ b/numpy/add_newdocs.py @@ -2174,43 +2174,6 @@ add_newdoc('numpy.core', 'einsum', """) -add_newdoc('numpy.core', 'alterdot', - """ - Change `dot`, `vdot`, and `inner` to use accelerated BLAS functions. - - Typically, as a user of Numpy, you do not explicitly call this function. If - Numpy is built with an accelerated BLAS, this function is automatically - called when Numpy is imported. - - When Numpy is built with an accelerated BLAS like ATLAS, these functions - are replaced to make use of the faster implementations. The faster - implementations only affect float32, float64, complex64, and complex128 - arrays. Furthermore, the BLAS API only includes matrix-matrix, - matrix-vector, and vector-vector products. Products of arrays with larger - dimensionalities use the built in functions and are not accelerated. - - See Also - -------- - restoredot : `restoredot` undoes the effects of `alterdot`. - - """) - -add_newdoc('numpy.core', 'restoredot', - """ - Restore `dot`, `vdot`, and `innerproduct` to the default non-BLAS - implementations. - - Typically, the user will only need to call this when troubleshooting and - installation problem, reproducing the conditions of a build without an - accelerated BLAS, or when being very careful about benchmarking linear - algebra operations. - - See Also - -------- - alterdot : `restoredot` undoes the effects of `alterdot`. - - """) - add_newdoc('numpy.core', 'vdot', """ vdot(a, b) @@ -3834,7 +3797,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('put', add_newdoc('numpy.core.multiarray', 'copyto', """ - copyto(dst, src, casting='same_kind', where=None, preservena=False) + copyto(dst, src, casting='same_kind', where=None) Copies values from one array to another, broadcasting as necessary. @@ -3862,9 +3825,6 @@ add_newdoc('numpy.core.multiarray', 'copyto', A boolean array which is broadcasted to match the dimensions of `dst`, and selects elements to copy from `src` to `dst` wherever it contains the value True. - preservena : bool, optional - If set to True, leaves any NA values in `dst` untouched. This - is similar to the "hard mask" feature in numpy.ma. """) @@ -3879,11 +3839,6 @@ add_newdoc('numpy.core.multiarray', 'putmask', If `values` is not the same size as `a` and `mask` then it will repeat. This gives behavior different from ``a[mask] = values``. - .. note:: The `putmask` functionality is also provided by `copyto`, which - can be significantly faster and in addition is NA-aware - (`preservena` keyword). Replacing `putmask` with - ``np.copyto(a, values, where=mask)`` is recommended. - Parameters ---------- a : array_like @@ -4459,12 +4414,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist', tobytesdoc = """ - a.tostring(order='C') + a.{name}(order='C') - Construct a Python string containing the raw data bytes in the array. + Construct Python bytes containing the raw data bytes in the array. - Constructs a Python string showing a copy of the raw contents of - data memory. The string can be produced in either 'C' or 'Fortran', + Constructs Python bytes showing a copy of the raw contents of + data memory. The bytes object can be produced in either 'C' or 'Fortran', or 'Any' order (the default is 'C'-order). 'Any' order means C-order unless the F_CONTIGUOUS flag in the array is set, in which case it means 'Fortran' order. @@ -4479,29 +4434,31 @@ tobytesdoc = """ Returns ------- - s : str - A Python string exhibiting a copy of `a`'s raw data. + s : bytes + Python bytes exhibiting a copy of `a`'s raw data. Examples -------- >>> x = np.array([[0, 1], [2, 3]]) >>> x.tobytes() - '\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00' + b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00' >>> x.tobytes('C') == x.tobytes() True >>> x.tobytes('F') - '\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00' + b'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00' """ add_newdoc('numpy.core.multiarray', 'ndarray', - ('tostring', tobytesdoc.format(deprecated= + ('tostring', tobytesdoc.format(name='tostring', + deprecated= 'This function is a compatibility ' 'alias for tobytes. Despite its ' 'name it returns bytes not ' 'strings.'))) add_newdoc('numpy.core.multiarray', 'ndarray', - ('tobytes', tobytesdoc.format(deprecated='.. versionadded:: 1.9.0'))) + ('tobytes', tobytesdoc.format(name='tobytes', + deprecated='.. versionadded:: 1.9.0'))) add_newdoc('numpy.core.multiarray', 'ndarray', ('trace', """ @@ -4880,14 +4837,15 @@ add_newdoc('numpy.lib._compiled_base', 'digitize', Parameters ---------- x : array_like - Input array to be binned. It has to be 1-dimensional. + Input array to be binned. Prior to Numpy 1.10.0, this array had to + be 1-dimensional, but can now have any shape. bins : array_like Array of bins. It has to be 1-dimensional and monotonic. right : bool, optional Indicating whether the intervals include the right or the left bin edge. Default behavior is (right==False) indicating that the interval - does not include the right edge. The left bin and is open in this - case. Ie., bins[i-1] <= x < bins[i] is the default behavior for + does not include the right edge. The left bin end is open in this + case, i.e., bins[i-1] <= x < bins[i] is the default behavior for monotonically increasing bins. Returns @@ -4898,7 +4856,7 @@ add_newdoc('numpy.lib._compiled_base', 'digitize', Raises ------ ValueError - If the input is not 1-dimensional, or if `bins` is not monotonic. + If `bins` is not monotonic. TypeError If the type of the input is complex. @@ -4912,6 +4870,13 @@ add_newdoc('numpy.lib._compiled_base', 'digitize', attempting to index `bins` with the indices that `digitize` returns will result in an IndexError. + .. versionadded:: 1.10.0 + + `np.digitize` is implemented in terms of `np.searchsorted`. This means + that a binary search is used to bin the values, which scales much better + for larger number of bins than the previous linear search. It also removes + the requirement for the input array to be 1-dimensional. + Examples -------- >>> x = np.array([0.2, 6.4, 3.0, 1.6]) @@ -4928,7 +4893,7 @@ add_newdoc('numpy.lib._compiled_base', 'digitize', 1.0 <= 1.6 < 2.5 >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) - >>> bins = np.array([0,5,10,15,20]) + >>> bins = np.array([0, 5, 10, 15, 20]) >>> np.digitize(x,bins,right=True) array([1, 2, 3, 4, 4]) >>> np.digitize(x,bins,right=False) @@ -5519,6 +5484,8 @@ add_newdoc('numpy.core', 'ufunc', ('reduce', in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. + .. versionadded:: 1.7.0 + Returns ------- r : ndarray diff --git a/numpy/compat/_inspect.py b/numpy/compat/_inspect.py index 6a499e727..c1aa22ec4 100644 --- a/numpy/compat/_inspect.py +++ b/numpy/compat/_inspect.py @@ -20,7 +20,9 @@ def ismethod(object): __name__ name with which this method was defined im_class class object in which this method belongs im_func function object containing implementation of method - im_self instance to which this method is bound, or None""" + im_self instance to which this method is bound, or None + + """ return isinstance(object, types.MethodType) def isfunction(object): @@ -33,7 +35,9 @@ def isfunction(object): func_defaults tuple of any default values for arguments func_doc (same as __doc__) func_globals global namespace in which this function was defined - func_name (same as __name__)""" + func_name (same as __name__) + + """ return isinstance(object, types.FunctionType) def iscode(object): @@ -51,7 +55,9 @@ def iscode(object): co_names tuple of names of local variables co_nlocals number of local variables co_stacksize virtual machine stack space required - co_varnames tuple of names of arguments and local variables""" + co_varnames tuple of names of arguments and local variables + + """ return isinstance(object, types.CodeType) # ------------------------------------------------ argument list extraction @@ -63,51 +69,23 @@ def getargs(co): Three things are returned: (args, varargs, varkw), where 'args' is a list of argument names (possibly containing nested lists), and - 'varargs' and 'varkw' are the names of the * and ** arguments or None.""" + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + + """ if not iscode(co): raise TypeError('arg is not a code object') - code = co.co_code nargs = co.co_argcount names = co.co_varnames args = list(names[:nargs]) - step = 0 # The following acrobatics are for anonymous (tuple) arguments. + # Which we do not need to support, so remove to avoid importing + # the dis module. for i in range(nargs): if args[i][:1] in ['', '.']: - stack, remain, count = [], [], [] - while step < len(code): - op = ord(code[step]) - step = step + 1 - if op >= dis.HAVE_ARGUMENT: - opname = dis.opname[op] - value = ord(code[step]) + ord(code[step+1])*256 - step = step + 2 - if opname in ['UNPACK_TUPLE', 'UNPACK_SEQUENCE']: - remain.append(value) - count.append(value) - elif opname == 'STORE_FAST': - stack.append(names[value]) - - # Special case for sublists of length 1: def foo((bar)) - # doesn't generate the UNPACK_TUPLE bytecode, so if - # `remain` is empty here, we have such a sublist. - if not remain: - stack[0] = [stack[0]] - break - else: - remain[-1] = remain[-1] - 1 - while remain[-1] == 0: - remain.pop() - size = count.pop() - stack[-size:] = [stack[-size:]] - if not remain: break - remain[-1] = remain[-1] - 1 - if not remain: break - args[i] = stack[0] - + raise TypeError("tuple function arguments are not supported") varargs = None if co.co_flags & CO_VARARGS: varargs = co.co_varnames[nargs] @@ -124,6 +102,7 @@ def getargspec(func): 'args' is a list of the argument names (it may contain nested lists). 'varargs' and 'varkw' are the names of the * and ** arguments or None. 'defaults' is an n-tuple of the default values of the last n arguments. + """ if ismethod(func): @@ -139,7 +118,9 @@ def getargvalues(frame): A tuple of four things is returned: (args, varargs, varkw, locals). 'args' is a list of the argument names (it may contain nested lists). 'varargs' and 'varkw' are the names of the * and ** arguments or None. - 'locals' is the locals dictionary of the given frame.""" + 'locals' is the locals dictionary of the given frame. + + """ args, varargs, varkw = getargs(frame.f_code) return args, varargs, varkw, frame.f_locals @@ -150,7 +131,9 @@ def joinseq(seq): return '(' + ', '.join(seq) + ')' def strseq(object, convert, join=joinseq): - """Recursively walk a sequence, stringifying each element.""" + """Recursively walk a sequence, stringifying each element. + + """ if type(object) in [list, tuple]: return join([strseq(_o, convert, join) for _o in object]) else: @@ -167,7 +150,9 @@ def formatargspec(args, varargs=None, varkw=None, defaults=None, The first four arguments are (args, varargs, varkw, defaults). The other four arguments are the corresponding optional formatting functions that are called to turn names and values into strings. The ninth - argument is an optional function to format the sequence of arguments.""" + argument is an optional function to format the sequence of arguments. + + """ specs = [] if defaults: firstdefault = len(args) - len(defaults) @@ -193,7 +178,9 @@ def formatargvalues(args, varargs, varkw, locals, The first four arguments are (args, varargs, varkw, locals). The next four arguments are the corresponding optional formatting functions that are called to turn names and values into strings. The ninth - argument is an optional function to format the sequence of arguments.""" + argument is an optional function to format the sequence of arguments. + + """ def convert(name, locals=locals, formatarg=formatarg, formatvalue=formatvalue): return formatarg(name) + formatvalue(locals[name]) @@ -204,18 +191,4 @@ def formatargvalues(args, varargs, varkw, locals, specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) if varkw: specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) - return '(' + string.join(specs, ', ') + ')' - -if __name__ == '__main__': - import inspect - def foo(x, y, z=None): - return None - - print(inspect.getargs(foo.__code__)) - print(getargs(foo.__code__)) - - print(inspect.getargspec(foo)) - print(getargspec(foo)) - - print(inspect.formatargspec(*inspect.getargspec(foo))) - print(formatargspec(*getargspec(foo))) + return '(' + ', '.join(specs) + ')' diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py index f5ac3f9f8..d95a362ca 100644 --- a/numpy/compat/py3k.py +++ b/numpy/compat/py3k.py @@ -36,7 +36,7 @@ if sys.version_info[0] >= 3: return str(s) def isfileobj(f): - return isinstance(f, (io.FileIO, io.BufferedReader)) + return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)) def open_latin1(filename, mode='r'): return open(filename, mode=mode, encoding='iso-8859-1') @@ -57,7 +57,6 @@ else: asstr = str strchar = 'S' - def isfileobj(f): return isinstance(f, file) diff --git a/numpy/compat/setup.py b/numpy/compat/setup.py index c163bcaf9..26161f330 100644 --- a/numpy/compat/setup.py +++ b/numpy/compat/setup.py @@ -8,5 +8,5 @@ def configuration(parent_package='',top_path=None): return config if __name__ == '__main__': - from numpy.distutils.core import setup + from numpy.distutils.core import setup setup(configuration=configuration) diff --git a/numpy/compat/tests/test_compat.py b/numpy/compat/tests/test_compat.py new file mode 100644 index 000000000..9822ab374 --- /dev/null +++ b/numpy/compat/tests/test_compat.py @@ -0,0 +1,23 @@ +from os.path import join + +from numpy.compat import isfileobj +from numpy.testing import assert_, run_module_suite +from numpy.testing.utils import tempdir + + +def test_isfileobj(): + with tempdir(prefix="numpy_test_compat_") as folder: + filename = join(folder, 'a.bin') + + with open(filename, 'wb') as f: + assert_(isfileobj(f)) + + with open(filename, 'ab') as f: + assert_(isfileobj(f)) + + with open(filename, 'rb') as f: + assert_(isfileobj(f)) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py index 79bc72a8c..371d34b58 100644 --- a/numpy/core/__init__.py +++ b/numpy/core/__init__.py @@ -3,7 +3,20 @@ from __future__ import division, absolute_import, print_function from .info import __doc__ from numpy.version import version as __version__ +# disables OpenBLAS affinity setting of the main thread that limits +# python threads or processes to one core +import os +envbak = os.environ.copy() +if 'OPENBLAS_MAIN_FREE' not in os.environ: + os.environ['OPENBLAS_MAIN_FREE'] = '1' +if 'GOTOBLAS_MAIN_FREE' not in os.environ: + os.environ['GOTOBLAS_MAIN_FREE'] = '1' from . import multiarray +os.environ.clear() +os.environ.update(envbak) +del envbak +del os + from . import umath from . import _internal # for freeze programs from . import numerictypes as nt @@ -52,7 +65,11 @@ bench = Tester().bench # The name numpy.core._ufunc_reconstruct must be # available for unpickling to work. def _ufunc_reconstruct(module, name): - mod = __import__(module) + # The `fromlist` kwarg is required to ensure that `mod` points to the + # inner-most module rather than the parent package when module name is + # nested. This makes it possible to pickle non-toplevel ufuncs such as + # scipy.special.expit for instance. + mod = __import__(module, fromlist=[name]) return getattr(mod, name) def _ufunc_reduce(func): diff --git a/numpy/core/bento.info b/numpy/core/bento.info index 299bd8ca5..0a22dc710 100644 --- a/numpy/core/bento.info +++ b/numpy/core/bento.info @@ -33,9 +33,6 @@ Library: Sources: src/private/scalarmathmodule.h.src, src/scalarmathmodule.c.src - Extension: _dotblas - Sources: - blasdot/_dotblas.c Extension: test_rational Sources: src/umath/test_rational.c.src diff --git a/numpy/core/bscript b/numpy/core/bscript index 5230aa428..61d0b93b8 100644 --- a/numpy/core/bscript +++ b/numpy/core/bscript @@ -28,7 +28,7 @@ from setup_common \ MANDATORY_FUNCS, C_ABI_VERSION, C_API_VERSION ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0") -NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "0") != "0") +NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0") NUMPYCONFIG_SYM = [] @@ -60,27 +60,13 @@ def define_no_smp(): #-------------------------------- # Checking SMP and thread options #-------------------------------- - # Python 2.3 causes a segfault when - # trying to re-acquire the thread-state - # which is done in error-handling - # ufunc code. NPY_ALLOW_C_API and friends - # cause the segfault. So, we disable threading - # for now. - if sys.version[:5] < '2.4.2': - nosmp = 1 - else: - # Perhaps a fancier check is in order here. - # so that threads are only enabled if there - # are actually multiple CPUS? -- but - # threaded code can be nice even on a single - # CPU so that long-calculating code doesn't - # block. - try: - nosmp = os.environ['NPY_NOSMP'] - nosmp = 1 - except KeyError: - nosmp = 0 - return nosmp == 1 + # Perhaps a fancier check is in order here. + # so that threads are only enabled if there + # are actually multiple CPUS? -- but + # threaded code can be nice even on a single + # CPU so that long-calculating code doesn't + # block. + return 'NPY_NOSMP' in os.environ def write_numpy_config(conf): subst_dict = {} @@ -440,10 +426,12 @@ def pre_build(context): "src/multiarray/arraytypes.c.src", "src/multiarray/nditer_templ.c.src", "src/multiarray/lowlevel_strided_loops.c.src", + "src/multiarray/templ_common.h.src", "src/multiarray/einsum.c.src"] bld(target="multiarray_templates", source=multiarray_templates) if ENABLE_SEPARATE_COMPILATION: - sources = [pjoin('src', 'multiarray', 'arrayobject.c'), + sources = [ + pjoin('src', 'multiarray', 'arrayobject.c'), pjoin('src', 'multiarray', 'alloc.c'), pjoin('src', 'multiarray', 'arraytypes.c.src'), pjoin('src', 'multiarray', 'array_assign.c'), @@ -452,6 +440,7 @@ def pre_build(context): pjoin('src', 'multiarray', 'buffer.c'), pjoin('src', 'multiarray', 'calculation.c'), pjoin('src', 'multiarray', 'common.c'), + pjoin('src', 'multiarray', 'templ_common.h.src'), pjoin('src', 'multiarray', 'conversion_utils.c'), pjoin('src', 'multiarray', 'convert.c'), pjoin('src', 'multiarray', 'convert_datatype.c'), @@ -485,17 +474,30 @@ def pre_build(context): pjoin('src', 'multiarray', 'sequence.c'), pjoin('src', 'multiarray', 'shape.c'), pjoin('src', 'multiarray', 'ucsnarrow.c'), - pjoin('src', 'multiarray', 'usertypes.c')] + pjoin('src', 'multiarray', 'usertypes.c'), + pjoin('src', 'multiarray', 'vdot.c'), + ] + + if bld.env.HAS_CBLAS: + sources.append(pjoin('src', 'multiarray', 'cblasfuncs.c')) else: sources = extension.sources + + use = 'npysort npymath' + defines = ['_FILE_OFFSET_BITS=64', + '_LARGEFILE_SOURCE=1', + '_LARGEFILE64_SOURCE=1'] + + if bld.env.HAS_CBLAS: + use += ' CBLAS' + defines.append('HAVE_CBLAS') + includes = ["src/multiarray", "src/private"] return context.default_builder(extension, includes=includes, source=sources, - use="npysort npymath", - defines=['_FILE_OFFSET_BITS=64', - '_LARGEFILE_SOURCE=1', - '_LARGEFILE64_SOURCE=1'] + use=use, + defines=defines ) context.register_builder("multiarray", builder_multiarray) @@ -537,9 +539,3 @@ def pre_build(context): context.tweak_extension("scalarmath", use="npymath", includes=["src/private"]) context.tweak_extension("multiarray_tests", use="npymath", includes=["src/private"]) context.tweak_extension("umath_tests", use="npymath", includes=["src/private"]) - - def build_dotblas(extension): - if bld.env.HAS_CBLAS: - return context.default_builder(extension, use="CBLAS", - includes=["src/multiarray", "src/private"]) - context.register_builder("_dotblas", build_dotblas) diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt index d62115224..acfced812 100644 --- a/numpy/core/code_generators/cversions.txt +++ b/numpy/core/code_generators/cversions.txt @@ -28,4 +28,4 @@ # Version 9 (NumPy 1.9) Added function annotations. # The interface has not changed, but the hash is different due to # the annotations, so keep the previous version number. -0x00000009 = 49b27dc2dc7206a775a7376fdbc3b80c +0x00000009 = 982c4ebb6e7e4c194bf46b1535b4ef1b diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py index 5ab60a37c..84bd042f5 100644 --- a/numpy/core/code_generators/genapi.py +++ b/numpy/core/code_generators/genapi.py @@ -473,9 +473,9 @@ def fullapi_hash(api_dicts): of the list of items in the API (as a string).""" a = [] for d in api_dicts: - for name, index in order_dict(d): + for name, data in order_dict(d): a.extend(name) - a.extend(str(index)) + a.extend(','.join(map(str, data))) return md5new(''.join(a).encode('ascii')).hexdigest() diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py index a590cfb48..415cbf7fc 100644 --- a/numpy/core/code_generators/generate_numpy_api.py +++ b/numpy/core/code_generators/generate_numpy_api.py @@ -8,8 +8,9 @@ from genapi import \ import numpy_api +# use annotated api when running under cpychecker h_template = r""" -#ifdef _MULTIARRAYMODULE +#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) typedef struct { PyObject_HEAD diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index e3c9cf28b..9dbeb76cd 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -20,6 +20,12 @@ ReorderableNone = "PyUFunc_ReorderableNone" class FullTypeDescr(object): pass +class FuncNameSuffix(object): + """Stores the suffix to append when generating functions names. + """ + def __init__(self, suffix): + self.suffix = suffix + class TypeDescription(object): """Type signature for a ufunc. @@ -691,6 +697,13 @@ defdict = { TD(inexact, f='sqrt', astype={'e':'f'}), TD(P, f='sqrt'), ), +'cbrt': + Ufunc(1, 1, None, + docstrings.get('numpy.core.umath.cbrt'), + None, + TD(flts, f='cbrt', astype={'e':'f'}), + TD(P, f='cbrt'), + ), 'ceil': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.ceil'), @@ -795,6 +808,30 @@ defdict = { None, TD(flts), ), +'ldexp' : + Ufunc(2, 1, None, + docstrings.get('numpy.core.umath.ldexp'), + None, + [TypeDescription('e', None, 'ei', 'e'), + TypeDescription('f', None, 'fi', 'f'), + TypeDescription('e', FuncNameSuffix('long'), 'el', 'e'), + TypeDescription('f', FuncNameSuffix('long'), 'fl', 'f'), + TypeDescription('d', None, 'di', 'd'), + TypeDescription('d', FuncNameSuffix('long'), 'dl', 'd'), + TypeDescription('g', None, 'gi', 'g'), + TypeDescription('g', FuncNameSuffix('long'), 'gl', 'g'), + ], + ), +'frexp' : + Ufunc(1, 2, None, + docstrings.get('numpy.core.umath.frexp'), + None, + [TypeDescription('e', None, 'e', 'ei'), + TypeDescription('f', None, 'f', 'fi'), + TypeDescription('d', None, 'd', 'di'), + TypeDescription('g', None, 'g', 'gi'), + ], + ) } if sys.version_info[0] >= 3: @@ -854,7 +891,7 @@ def make_arrays(funcdict): thedict = chartotype1 # one input and one output for t in uf.type_descriptions: - if t.func_data not in (None, FullTypeDescr): + if t.func_data not in (None, FullTypeDescr) and not isinstance(t.func_data, FuncNameSuffix): funclist.append('NULL') astype = '' if not t.astype is None: @@ -880,6 +917,10 @@ def make_arrays(funcdict): tname = english_upper(chartoname[t.type]) datalist.append('(void *)NULL') funclist.append('%s_%s_%s_%s' % (tname, t.in_, t.out, name)) + elif isinstance(t.func_data, FuncNameSuffix): + datalist.append('(void *)NULL') + tname = english_upper(chartoname[t.type]) + funclist.append('%s_%s_%s' % (tname, name, t.func_data.suffix)) else: datalist.append('(void *)NULL') tname = english_upper(chartoname[t.type]) diff --git a/numpy/core/code_generators/numpy_api.py b/numpy/core/code_generators/numpy_api.py index cd6d2f176..972966627 100644 --- a/numpy/core/code_generators/numpy_api.py +++ b/numpy/core/code_generators/numpy_api.py @@ -341,6 +341,9 @@ multiarray_funcs_api = { 'PyArray_SelectkindConverter': (298,), 'PyDataMem_NEW_ZEROED': (299,), # End 1.8 API + # End 1.9 API + 'PyArray_CheckAnyScalarExact': (300, NonNull(1)), + # End 1.10 API } ufunc_types_api = { diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index 4d302969e..67c01cc67 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -3104,6 +3104,35 @@ add_newdoc('numpy.core.umath', 'sqrt', """) +add_newdoc('numpy.core.umath', 'cbrt', + """ + Return the cube-root of an array, element-wise. + + .. versionadded:: 1.10.0 + + Parameters + ---------- + x : array_like + The values whose cube-roots are required. + out : ndarray, optional + Alternate array object in which to put the result; if provided, it + must have the same shape as `x` + + Returns + ------- + y : ndarray + An array of the same shape as `x`, containing the cube + cube-root of each element in `x`. + If `out` was provided, `y` is a reference to it. + + + Examples + -------- + >>> np.cbrt([1,8,27]) + array([ 1., 2., 3.]) + + """) + add_newdoc('numpy.core.umath', 'square', """ Return the element-wise square of the input. @@ -3324,9 +3353,6 @@ add_newdoc('numpy.core.umath', 'true_divide', """) -# This doc is not currently used, but has been converted to a C string -# that can be found in numpy/core/src/umath/umathmodule.c where the -# frexp ufunc is constructed. add_newdoc('numpy.core.umath', 'frexp', """ Decompose the elements of x into mantissa and twos exponent. @@ -3372,9 +3398,6 @@ add_newdoc('numpy.core.umath', 'frexp', """) -# This doc is not currently used, but has been converted to a C string -# that can be found in numpy/core/src/umath/umathmodule.c where the -# ldexp ufunc is constructed. add_newdoc('numpy.core.umath', 'ldexp', """ Returns x1 * 2**x2, element-wise. diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 49fd57e29..674cd5f59 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -880,7 +880,7 @@ def argsort(a, axis=-1, kind='quicksort', order=None): def argmax(a, axis=None): """ - Indices of the maximum values along an axis. + Returns the indices of the maximum values along an axis. Parameters ---------- @@ -937,12 +937,52 @@ def argmax(a, axis=None): def argmin(a, axis=None): """ - Return the indices of the minimum values along an axis. + Returns the indices of the minimum values along an axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise + along the specified axis. + + Returns + ------- + index_array : ndarray of ints + Array of indices into the array. It has the same shape as `a.shape` + with the dimension along `axis` removed. See Also -------- - argmax : Similar function. Please refer to `numpy.argmax` for detailed - documentation. + ndarray.argmin, argmax + amin : The minimum value along a given axis. + unravel_index : Convert a flat index into an index tuple. + + Notes + ----- + In case of multiple occurrences of the minimum values, the indices + corresponding to the first occurrence are returned. + + Examples + -------- + >>> a = np.arange(6).reshape(2,3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.argmin(a) + 0 + >>> np.argmin(a, axis=0) + array([0, 0, 0]) + >>> np.argmin(a, axis=1) + array([0, 0]) + + >>> b = np.arange(6) + >>> b[4] = 0 + >>> b + array([0, 1, 2, 3, 0, 5]) + >>> np.argmin(b) # Only the first occurrence is returned. + 0 """ try: @@ -2055,8 +2095,14 @@ def amax(a, axis=None, out=None, keepdims=False): ---------- a : array_like Input data. - axis : int, optional - Axis along which to operate. By default, flattened input is used. + axis : None or int or tuple of ints, optional + Axis or axes along which to operate. By default, flattened input is + used. + + .. versionadded: 1.7.0 + + If this is a tuple of ints, the maximum is selected over multiple axes, + instead of a single axis or all the axes as before. out : ndarray, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. @@ -2139,8 +2185,14 @@ def amin(a, axis=None, out=None, keepdims=False): ---------- a : array_like Input data. - axis : int, optional - Axis along which to operate. By default, flattened input is used. + axis : None or int or tuple of ints, optional + Axis or axes along which to operate. By default, flattened input is + used. + + .. versionadded: 1.7.0 + + If this is a tuple of ints, the minimum is selected over multiple axes, + instead of a single axis or all the axes as before. out : ndarray, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. @@ -2653,9 +2705,14 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False): a : array_like Array containing numbers whose mean is desired. If `a` is not an array, a conversion is attempted. - axis : int, optional - Axis along which the means are computed. The default is to compute - the mean of the flattened array. + axis : None or int or tuple of ints, optional + Axis or axes along which the means are computed. The default is to + compute the mean of the flattened array. + + .. versionadded: 1.7.0 + + If this is a tuple of ints, a mean is performed over multiple axes, + instead of a single axis or all the axes as before. dtype : data-type, optional Type to use in computing the mean. For integer inputs, the default is `float64`; for floating point inputs, it is the same as the @@ -2738,9 +2795,14 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): ---------- a : array_like Calculate the standard deviation of these values. - axis : int, optional - Axis along which the standard deviation is computed. The default is - to compute the standard deviation of the flattened array. + axis : None or int or tuple of ints, optional + Axis or axes along which the standard deviation is computed. The + default is to compute the standard deviation of the flattened array. + + .. versionadded: 1.7.0 + + If this is a tuple of ints, a standard deviation is performed over + multiple axes, instead of a single axis or all the axes as before. dtype : dtype, optional Type to use in computing the standard deviation. For arrays of integer type the default is float64, for arrays of float types it is @@ -2841,9 +2903,14 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, a : array_like Array containing numbers whose variance is desired. If `a` is not an array, a conversion is attempted. - axis : int, optional - Axis along which the variance is computed. The default is to compute - the variance of the flattened array. + axis : None or int or tuple of ints, optional + Axis or axes along which the variance is computed. The default is to + compute the variance of the flattened array. + + .. versionadded: 1.7.0 + + If this is a tuple of ints, a variance is performed over multiple axes, + instead of a single axis or all the axes as before. dtype : data-type, optional Type to use in computing the variance. For arrays of integer type the default is `float32`; for arrays of float types it is the same as diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py index 400e75eb5..0bf93390e 100644 --- a/numpy/core/function_base.py +++ b/numpy/core/function_base.py @@ -36,6 +36,8 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None): The type of the output array. If `dtype` is not given, infer the data type from the other input arguments. + .. versionadded:: 1.9.0 + Returns ------- samples : ndarray diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index 21ff8cd1a..78f79d5fe 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -203,12 +203,6 @@ typedef enum { NPY_SAME_KIND_CASTING=3, /* Allow any casts */ NPY_UNSAFE_CASTING=4, - - /* - * Temporary internal definition only, will be removed in upcoming - * release, see below - * */ - NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, } NPY_CASTING; typedef enum { diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h index fec95779a..8a9109c5c 100644 --- a/numpy/core/include/numpy/npy_3kcompat.h +++ b/numpy/core/include/numpy/npy_3kcompat.h @@ -298,7 +298,7 @@ npy_PyFile_DupClose(PyObject *file, FILE* handle) #else /* DEPRECATED, DO NOT USE */ -#define npy_PyFile_DupClose(f, h, p) npy_PyFile_DupClose2((f), (h), (p)) +#define npy_PyFile_DupClose(f, h) npy_PyFile_DupClose2((f), (h), 0) /* use these */ static NPY_INLINE FILE * diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h index 24d4ce1fc..60abae4e0 100644 --- a/numpy/core/include/numpy/npy_cpu.h +++ b/numpy/core/include/numpy/npy_cpu.h @@ -20,6 +20,7 @@ #define _NPY_CPUARCH_H_ #include "numpyconfig.h" +#include <string.h> /* for memcpy */ #if defined( __i386__ ) || defined(i386) || defined(_M_IX86) /* @@ -80,38 +81,7 @@ information about your platform (OS, CPU and compiler) #endif -/* - This "white-lists" the architectures that we know don't require - pointer alignment. We white-list, since the memcpy version will - work everywhere, whereas assignment will only work where pointer - dereferencing doesn't require alignment. - - TODO: There may be more architectures we can white list. -*/ -#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) - #define NPY_COPY_PYOBJECT_PTR(dst, src) (*((PyObject **)(dst)) = *((PyObject **)(src))) -#else - #if NPY_SIZEOF_PY_INTPTR_T == 4 - #define NPY_COPY_PYOBJECT_PTR(dst, src) \ - ((char*)(dst))[0] = ((char*)(src))[0]; \ - ((char*)(dst))[1] = ((char*)(src))[1]; \ - ((char*)(dst))[2] = ((char*)(src))[2]; \ - ((char*)(dst))[3] = ((char*)(src))[3]; - #elif NPY_SIZEOF_PY_INTPTR_T == 8 - #define NPY_COPY_PYOBJECT_PTR(dst, src) \ - ((char*)(dst))[0] = ((char*)(src))[0]; \ - ((char*)(dst))[1] = ((char*)(src))[1]; \ - ((char*)(dst))[2] = ((char*)(src))[2]; \ - ((char*)(dst))[3] = ((char*)(src))[3]; \ - ((char*)(dst))[4] = ((char*)(src))[4]; \ - ((char*)(dst))[5] = ((char*)(src))[5]; \ - ((char*)(dst))[6] = ((char*)(src))[6]; \ - ((char*)(dst))[7] = ((char*)(src))[7]; - #else - #error Unknown architecture, please report this to numpy maintainers with \ - information about your platform (OS, CPU and compiler) - #endif -#endif +#define NPY_COPY_PYOBJECT_PTR(dst, src) memcpy(dst, src, sizeof(PyObject *)) #if (defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64)) #define NPY_CPU_HAVE_UNALIGNED_ACCESS 1 diff --git a/numpy/core/include/numpy/npy_endian.h b/numpy/core/include/numpy/npy_endian.h index 3ba03d0e3..a8ec57245 100644 --- a/numpy/core/include/numpy/npy_endian.h +++ b/numpy/core/include/numpy/npy_endian.h @@ -10,10 +10,22 @@ /* Use endian.h if available */ #include <endian.h> - #define NPY_BYTE_ORDER __BYTE_ORDER - #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN - #define NPY_BIG_ENDIAN __BIG_ENDIAN -#else + #if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && defined(LITTLE_ENDIAN) + #define NPY_BYTE_ORDER BYTE_ORDER + #define NPY_LITTLE_ENDIAN LITTLE_ENDIAN + #define NPY_BIG_ENDIAN BIG_ENDIAN + #elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN) + #define NPY_BYTE_ORDER _BYTE_ORDER + #define NPY_LITTLE_ENDIAN _LITTLE_ENDIAN + #define NPY_BIG_ENDIAN _BIG_ENDIAN + #elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN) + #define NPY_BYTE_ORDER __BYTE_ORDER + #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN + #define NPY_BIG_ENDIAN __BIG_ENDIAN + #endif +#endif + +#ifndef NPY_BYTE_ORDER /* Set endianness info using target CPU */ #include "npy_cpu.h" diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h index b7920460d..0137e0556 100644 --- a/numpy/core/include/numpy/npy_math.h +++ b/numpy/core/include/numpy/npy_math.h @@ -118,15 +118,12 @@ double npy_tanh(double x); double npy_asin(double x); double npy_acos(double x); double npy_atan(double x); -double npy_aexp(double x); -double npy_alog(double x); -double npy_asqrt(double x); -double npy_afabs(double x); double npy_log(double x); double npy_log10(double x); double npy_exp(double x); double npy_sqrt(double x); +double npy_cbrt(double x); double npy_fabs(double x); double npy_ceil(double x); @@ -147,6 +144,8 @@ double npy_log2(double x); double npy_atan2(double x, double y); double npy_pow(double x, double y); double npy_modf(double x, double* y); +double npy_frexp(double x, int* y); +double npy_ldexp(double n, int y); double npy_copysign(double x, double y); double npy_nextafter(double x, double y); @@ -231,6 +230,7 @@ float npy_ceilf(float x); float npy_rintf(float x); float npy_truncf(float x); float npy_sqrtf(float x); +float npy_cbrtf(float x); float npy_log10f(float x); float npy_logf(float x); float npy_expf(float x); @@ -251,6 +251,8 @@ float npy_powf(float x, float y); float npy_fmodf(float x, float y); float npy_modff(float x, float* y); +float npy_frexpf(float x, int* y); +float npy_ldexpf(float x, int y); float npy_copysignf(float x, float y); float npy_nextafterf(float x, float y); @@ -272,6 +274,7 @@ npy_longdouble npy_ceill(npy_longdouble x); npy_longdouble npy_rintl(npy_longdouble x); npy_longdouble npy_truncl(npy_longdouble x); npy_longdouble npy_sqrtl(npy_longdouble x); +npy_longdouble npy_cbrtl(npy_longdouble x); npy_longdouble npy_log10l(npy_longdouble x); npy_longdouble npy_logl(npy_longdouble x); npy_longdouble npy_expl(npy_longdouble x); @@ -292,6 +295,8 @@ npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y); npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y); npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); +npy_longdouble npy_frexpl(npy_longdouble x, int* y); +npy_longdouble npy_ldexpl(npy_longdouble x, int y); npy_longdouble npy_copysignl(npy_longdouble x, npy_longdouble y); npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y); diff --git a/numpy/core/include/numpy/ufuncobject.h b/numpy/core/include/numpy/ufuncobject.h index 38e3dcf0f..a24a0d837 100644 --- a/numpy/core/include/numpy/ufuncobject.h +++ b/numpy/core/include/numpy/ufuncobject.h @@ -152,13 +152,13 @@ typedef struct _tagPyUFuncObject { int check_return; /* The name of the ufunc */ - char *name; + const char *name; /* Array of type numbers, of size ('nargs' * 'ntypes') */ char *types; /* Documentation string */ - char *doc; + const char *doc; void *ptr; PyObject *obj; diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py index b1c96ee29..4b10f361c 100644 --- a/numpy/core/memmap.py +++ b/numpy/core/memmap.py @@ -111,6 +111,11 @@ class memmap(ndarray): certain size depending on the platform. This size is always < 2GB even on 64-bit systems. + When a memmap causes a file to be created or extended beyond its + current size in the filesystem, the contents of the new part are + unspecified. On systems with POSIX filesystem semantics, the extended + part will be filled with zero bytes. + Examples -------- >>> data = np.arange(12, dtype='float32') diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 57784a51f..ac51f5d01 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -4,11 +4,13 @@ import os import sys import warnings import collections -from . import multiarray +from numpy.core import multiarray from . import umath -from .umath import * +from .umath import (invert, sin, UFUNC_BUFSIZE_DEFAULT, ERR_IGNORE, + ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG, + ERR_DEFAULT, PINF, NAN) from . import numerictypes -from .numerictypes import * +from .numerictypes import longlong, intc, int_, float_, complex_, bool_ if sys.version_info[0] >= 3: import pickle @@ -358,9 +360,6 @@ def extend_all(module): if a not in adict: __all__.append(a) -extend_all(umath) -extend_all(numerictypes) - newaxis = None @@ -986,7 +985,7 @@ def convolve(a,v,mode='full'): array([ 2.5]) """ - a, v = array(a, ndmin=1), array(v, ndmin=1) + a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1) if (len(v) > len(a)): a, v = v, a if len(a) == 0 : @@ -1078,31 +1077,61 @@ def outer(a, b, out=None): # try to import blas optimized dot if available envbak = os.environ.copy() -try: - # importing this changes the dot function for basic 4 types - # to blas-optimized versions. - - # disables openblas affinity setting of the main thread that limits - # python threads or processes to one core - if 'OPENBLAS_MAIN_FREE' not in os.environ: - os.environ['OPENBLAS_MAIN_FREE'] = '1' - if 'GOTOBLAS_MAIN_FREE' not in os.environ: - os.environ['GOTOBLAS_MAIN_FREE'] = '1' - from ._dotblas import dot, vdot, inner, alterdot, restoredot -except ImportError: - # docstrings are in add_newdocs.py - inner = multiarray.inner - dot = multiarray.dot - def vdot(a, b): - return dot(asarray(a).ravel().conj(), asarray(b).ravel()) - def alterdot(): - pass - def restoredot(): - pass -finally: - os.environ.clear() - os.environ.update(envbak) - del envbak +dot = multiarray.dot +inner = multiarray.inner +vdot = multiarray.vdot + +def alterdot(): + """ + Change `dot`, `vdot`, and `inner` to use accelerated BLAS functions. + + Typically, as a user of Numpy, you do not explicitly call this + function. If Numpy is built with an accelerated BLAS, this function is + automatically called when Numpy is imported. + + When Numpy is built with an accelerated BLAS like ATLAS, these + functions are replaced to make use of the faster implementations. The + faster implementations only affect float32, float64, complex64, and + complex128 arrays. Furthermore, the BLAS API only includes + matrix-matrix, matrix-vector, and vector-vector products. Products of + arrays with larger dimensionalities use the built in functions and are + not accelerated. + + .. note:: Deprecated in Numpy 1.10 + The cblas functions have been integrated into the multarray + module and alterdot now longer does anything. It will be + removed in Numpy 1.11.0. + + See Also + -------- + restoredot : `restoredot` undoes the effects of `alterdot`. + + """ + warnings.warn("alterdot no longer does anything.", DeprecationWarning) + + +def restoredot(): + """ + Restore `dot`, `vdot`, and `innerproduct` to the default non-BLAS + implementations. + + Typically, the user will only need to call this when troubleshooting + and installation problem, reproducing the conditions of a build without + an accelerated BLAS, or when being very careful about benchmarking + linear algebra operations. + + .. note:: Deprecated in Numpy 1.10 + The cblas functions have been integrated into the multarray + module and restoredot now longer does anything. It will be + removed in Numpy 1.11.0. + + See Also + -------- + alterdot : `restoredot` undoes the effects of `alterdot`. + + """ + warnings.warn("restoredot no longer does anything.", DeprecationWarning) + def tensordot(a, b, axes=2): """ @@ -2154,6 +2183,41 @@ def identity(n, dtype=None): from numpy import eye return eye(n, dtype=dtype) +def _allclose_points(a, b, rtol=1.e-5, atol=1.e-8): + """ + This is the point-wise inner calculation of 'allclose', which is subtly + different from 'isclose'. Provided as a comparison routine for use in + testing.assert_allclose. + See those routines for further details. + + """ + x = array(a, copy=False, ndmin=1) + y = array(b, copy=False, ndmin=1) + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + dtype = multiarray.result_type(y, 1.) + y = array(y, dtype=dtype, copy=False) + + xinf = isinf(x) + yinf = isinf(y) + if any(xinf) or any(yinf): + # Check that x and y have inf's only in the same positions + if not all(xinf == yinf): + return False + # Check that sign of inf's in x and y is the same + if not all(x[xinf] == y[xinf]): + return False + + x = x[~xinf] + y = y[~xinf] + + # ignore invalid fpe's + with errstate(invalid='ignore'): + r = less_equal(abs(x - y), atol + rtol * abs(y)) + + return r + def allclose(a, b, rtol=1.e-5, atol=1.e-8): """ Returns True if two arrays are element-wise equal within a tolerance. @@ -2209,32 +2273,7 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8): False """ - x = array(a, copy=False, ndmin=1) - y = array(b, copy=False, ndmin=1) - - # make sure y is an inexact type to avoid abs(MIN_INT); will cause - # casting of x later. - dtype = multiarray.result_type(y, 1.) - y = array(y, dtype=dtype, copy=False) - - xinf = isinf(x) - yinf = isinf(y) - if any(xinf) or any(yinf): - # Check that x and y have inf's only in the same positions - if not all(xinf == yinf): - return False - # Check that sign of inf's in x and y is the same - if not all(x[xinf] == y[xinf]): - return False - - x = x[~xinf] - y = y[~xinf] - - # ignore invalid fpe's - with errstate(invalid='ignore'): - r = all(less_equal(abs(x - y), atol + rtol * abs(y))) - - return r + return all(_allclose_points(a, b, rtol=rtol, atol=atol)) def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): """ @@ -2834,6 +2873,10 @@ nan = NaN = NAN False_ = bool_(False) True_ = bool_(True) +from .umath import * +from .numerictypes import * from . import fromnumeric from .fromnumeric import * extend_all(fromnumeric) +extend_all(umath) +extend_all(numerictypes) diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index 1545bc734..0c03cce89 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -670,7 +670,7 @@ def issubclass_(arg1, arg2): Determine if a class is a subclass of a second class. `issubclass_` is equivalent to the Python built-in ``issubclass``, - except that it returns False instead of raising a TypeError is one + except that it returns False instead of raising a TypeError if one of the arguments is not a class. Parameters diff --git a/numpy/core/records.py b/numpy/core/records.py index d0f82a25c..bf4d835ea 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -71,7 +71,6 @@ _byteorderconv = {'b':'>', # are equally allowed numfmt = nt.typeDict -_typestr = nt._typestr def find_duplicate(list): """Find duplication in a list, return a list of duplicated elements""" @@ -268,7 +267,7 @@ class record(nt.void): """Pretty-print all fields.""" # pretty-print all fields names = self.dtype.names - maxlen = max([len(name) for name in names]) + maxlen = max(len(name) for name in names) rows = [] fmt = '%% %ds: %%s' % maxlen for name in names: @@ -527,15 +526,12 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None, if formats is None and dtype is None: # go through each object in the list to see if it is an ndarray # and determine the formats. - formats = '' + formats = [] for obj in arrayList: if not isinstance(obj, ndarray): raise ValueError("item in the array list must be an ndarray.") - formats += _typestr[obj.dtype.type] - if issubclass(obj.dtype.type, nt.flexible): - formats += repr(obj.itemsize) - formats += ',' - formats = formats[:-1] + formats.append(obj.dtype.str) + formats = ','.join(formats) if dtype is not None: descr = sb.dtype(dtype) diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 5da042413..007a381ae 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -19,7 +19,7 @@ from setup_common import * ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0") # Set to True to enable relaxed strides checking. This (mostly) means # that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags. -NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "0") != "0") +NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0") # XXX: ugly, we use a class to avoid calling twice some expensive functions in # config.h/numpyconfig.h. I don't see a better way because distutils force @@ -78,27 +78,13 @@ def is_npy_no_signal(): def is_npy_no_smp(): """Return True if the NPY_NO_SMP symbol must be defined in public header (when SMP support cannot be reliably enabled).""" - # Python 2.3 causes a segfault when - # trying to re-acquire the thread-state - # which is done in error-handling - # ufunc code. NPY_ALLOW_C_API and friends - # cause the segfault. So, we disable threading - # for now. - if sys.version[:5] < '2.4.2': - nosmp = 1 - else: - # Perhaps a fancier check is in order here. - # so that threads are only enabled if there - # are actually multiple CPUS? -- but - # threaded code can be nice even on a single - # CPU so that long-calculating code doesn't - # block. - try: - nosmp = os.environ['NPY_NOSMP'] - nosmp = 1 - except KeyError: - nosmp = 0 - return nosmp == 1 + # Perhaps a fancier check is in order here. + # so that threads are only enabled if there + # are actually multiple CPUS? -- but + # threaded code can be nice even on a single + # CPU so that long-calculating code doesn't + # block. + return 'NPY_NOSMP' in os.environ def win32_checks(deflist): from numpy.distutils.misc_util import get_build_architecture @@ -176,12 +162,11 @@ def check_math_capabilities(config, moredefs, mathlibs): moredefs.append((fname2def(f), 1)) for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES: - if config.check_func(fn, decl='int %s %s(void *);' % (dec, fn), - call=False): + if config.check_gcc_function_attribute(dec, fn): moredefs.append((fname2def(fn), 1)) for fn in OPTIONAL_VARIABLE_ATTRIBUTES: - if config.check_func(fn, decl='int %s a;' % (fn), call=False): + if config.check_gcc_variable_attribute(fn): m = fn.replace("(", "_").replace(")", "_") moredefs.append((fname2def(m), 1)) @@ -280,11 +265,11 @@ def check_types(config_cmd, ext, build_dir): expected['long'] = [8, 4] expected['float'] = [4] expected['double'] = [8] - expected['long double'] = [8, 12, 16] - expected['Py_intptr_t'] = [4, 8] + expected['long double'] = [16, 12, 8] + expected['Py_intptr_t'] = [8, 4] expected['PY_LONG_LONG'] = [8] expected['long long'] = [8] - expected['off_t'] = [4, 8] + expected['off_t'] = [8, 4] # Check we have the python header (-dev* packages on Linux) result = config_cmd.check_header('Python.h') @@ -324,7 +309,8 @@ def check_types(config_cmd, ext, build_dir): # definition is binary compatible with C99 complex type (check done at # build time in npy_common.h) complex_def = "struct {%s __x; %s __y;}" % (type, type) - res = config_cmd.check_type_size(complex_def, expected=2*expected[type]) + res = config_cmd.check_type_size(complex_def, + expected=[2 * x for x in expected[type]]) if res >= 0: public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res)) else: @@ -447,6 +433,9 @@ def configuration(parent_package='',top_path=None): if sys.platform=='win32' or os.name=='nt': win32_checks(moredefs) + # C99 restrict keyword + moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict())) + # Inline check inline = config_cmd.check_inline() @@ -752,6 +741,7 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'buffer.h'), join('src', 'multiarray', 'calculation.h'), join('src', 'multiarray', 'common.h'), + join('src', 'multiarray', 'templ_common.h.src'), join('src', 'multiarray', 'convert_datatype.h'), join('src', 'multiarray', 'convert.h'), join('src', 'multiarray', 'conversion_utils.h'), @@ -773,6 +763,7 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'shape.h'), join('src', 'multiarray', 'ucsnarrow.h'), join('src', 'multiarray', 'usertypes.h'), + join('src', 'multiarray', 'vdot.h'), join('src', 'private', 'lowlevel_strided_loops.h'), join('include', 'numpy', 'arrayobject.h'), join('include', 'numpy', '_neighborhood_iterator_imp.h'), @@ -826,6 +817,7 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'mapping.c'), join('src', 'multiarray', 'methods.c'), join('src', 'multiarray', 'multiarraymodule.c'), + join('src', 'multiarray', 'templ_common.h.src'), join('src', 'multiarray', 'nditer_templ.c.src'), join('src', 'multiarray', 'nditer_api.c'), join('src', 'multiarray', 'nditer_constr.c'), @@ -839,23 +831,34 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'scalarapi.c'), join('src', 'multiarray', 'scalartypes.c.src'), join('src', 'multiarray', 'usertypes.c'), - join('src', 'multiarray', 'ucsnarrow.c')] + join('src', 'multiarray', 'ucsnarrow.c'), + join('src', 'multiarray', 'vdot.c'), + ] + blas_info = get_info('blas_opt', 0) + if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []): + extra_info = blas_info + multiarray_src.append(join('src', 'multiarray', 'cblasfuncs.c')) + else: + extra_info = {} if not ENABLE_SEPARATE_COMPILATION: multiarray_deps.extend(multiarray_src) multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')] multiarray_src.append(generate_multiarray_templated_sources) + multiarray_src.append(join('src', 'multiarray', 'templ_common.h.src')) + config.add_extension('multiarray', - sources = multiarray_src + + sources=multiarray_src + [generate_config_h, - generate_numpyconfig_h, - generate_numpy_api, - join(codegen_dir, 'generate_numpy_api.py'), - join('*.py')], - depends = deps + multiarray_deps, - libraries = ['npymath', 'npysort']) + generate_numpyconfig_h, + generate_numpy_api, + join(codegen_dir, 'generate_numpy_api.py'), + join('*.py')], + depends=deps + multiarray_deps, + libraries=['npymath', 'npysort'], + extra_info=extra_info) ####################################################################### # umath module # @@ -943,28 +946,6 @@ def configuration(parent_package='',top_path=None): libraries = ['npymath'], ) - ####################################################################### - # _dotblas module # - ####################################################################### - - # Configure blasdot - blas_info = get_info('blas_opt', 0) - #blas_info = {} - def get_dotblas_sources(ext, build_dir): - if blas_info: - if ('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', []): - return None # dotblas needs ATLAS, Fortran compiled blas will not be sufficient. - return ext.depends[:1] - return None # no extension module will be built - - config.add_extension('_dotblas', - sources = [get_dotblas_sources], - depends = [join('blasdot', '_dotblas.c'), - join('blasdot', 'cblas.h'), - ], - include_dirs = ['blasdot'], - extra_info = blas_info - ) ####################################################################### # umath_tests module # diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index be5673a47..e51797c03 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -100,7 +100,7 @@ MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh", "rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow", "copysign", "nextafter", "ftello", "fseeko", - "strtoll", "strtoull"] + "strtoll", "strtoull", "cbrt"] OPTIONAL_HEADERS = [ @@ -145,7 +145,7 @@ C99_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil", "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", "expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh", "hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp', - "exp2", "log2", "copysign", "nextafter"] + "exp2", "log2", "copysign", "nextafter", "cbrt"] C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS] C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS] @@ -172,10 +172,20 @@ def check_long_double_representation(cmd): body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'} # We need to use _compile because we need the object filename - src, object = cmd._compile(body, None, None, 'c') + src, obj = cmd._compile(body, None, None, 'c') try: - type = long_double_representation(pyod(object)) - return type + ltype = long_double_representation(pyod(obj)) + return ltype + except ValueError: + # try linking to support CC="gcc -flto" or icc -ipo + # struct needs to be volatile so it isn't optimized away + body = body.replace('struct', 'volatile struct') + body += "int main(void) { return 0; }\n" + src, obj = cmd._compile(body, None, None, 'c') + cmd.temp_files.append("_configtest") + cmd.compiler.link_executable([obj], "_configtest") + ltype = long_double_representation(pyod("_configtest")) + return ltype finally: cmd._clean() diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c index 06c9ff296..77eb0416b 100644 --- a/numpy/core/src/multiarray/alloc.c +++ b/numpy/core/src/multiarray/alloc.c @@ -13,9 +13,14 @@ #define NBUCKETS 1024 /* number of buckets for data*/ #define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ -#define NCACHE 8 /* 1 + number of cache entries per bucket */ -static void * datacache[NBUCKETS][NCACHE]; -static void * dimcache[NBUCKETS_DIM][NCACHE]; +#define NCACHE 7 /* number of cache entries per bucket */ +/* this structure fits neatly into a cacheline */ +typedef struct { + npy_uintp available; /* number of cached pointers */ + void * ptrs[NCACHE]; +} cache_bucket; +static cache_bucket datacache[NBUCKETS]; +static cache_bucket dimcache[NBUCKETS_DIM]; /* * very simplistic small memory block cache to avoid more expensive libc @@ -25,15 +30,13 @@ static void * dimcache[NBUCKETS_DIM][NCACHE]; */ static NPY_INLINE void * _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz, - void * (*cache)[NCACHE], void * (*alloc)(size_t)) + cache_bucket * cache, void * (*alloc)(size_t)) { assert((esz == 1 && cache == datacache) || (esz == sizeof(npy_intp) && cache == dimcache)); if (nelem < msz) { - /* first entry is used as fill counter */ - npy_uintp * idx = (npy_uintp *)&(cache[nelem][0]); - if (*idx > 0) { - return cache[nelem][(*idx)--]; + if (cache[nelem].available > 0) { + return cache[nelem].ptrs[--(cache[nelem].available)]; } } return alloc(nelem * esz); @@ -45,13 +48,11 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz, */ static NPY_INLINE void _npy_free_cache(void * p, npy_uintp nelem, npy_uint msz, - void * (*cache)[NCACHE], void (*dealloc)(void *)) + cache_bucket * cache, void (*dealloc)(void *)) { if (p != NULL && nelem < msz) { - /* first entry is used as fill counter */ - npy_uintp * idx = (npy_uintp *)&(cache[nelem][0]); - if (*idx < NCACHE - 1) { - cache[nelem][++(*idx)] = p; + if (cache[nelem].available < NCACHE) { + cache[nelem].ptrs[cache[nelem].available++] = p; return; } } @@ -76,7 +77,9 @@ npy_alloc_cache_zero(npy_uintp sz) NPY_BEGIN_THREADS_DEF; if (sz < NBUCKETS) { p = _npy_alloc_cache(sz, 1, NBUCKETS, datacache, &PyDataMem_NEW); - memset(p, 0, sz); + if (p) { + memset(p, 0, sz); + } return p; } NPY_BEGIN_THREADS; diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c index d92522a9f..6f9c89082 100644 --- a/numpy/core/src/multiarray/arrayobject.c +++ b/numpy/core/src/multiarray/arrayobject.c @@ -429,19 +429,46 @@ array_dealloc(PyArrayObject *self) Py_TYPE(self)->tp_free((PyObject *)self); } +/* + * Extend string. On failure, returns NULL and leaves *strp alone. + * XXX we do this in multiple places; time for a string library? + */ +static char * +extend(char **strp, Py_ssize_t n, Py_ssize_t *maxp) +{ + char *str = *strp; + Py_ssize_t new_cap; + + if (n >= *maxp - 16) { + new_cap = *maxp * 2; + + if (new_cap <= *maxp) { /* overflow */ + return NULL; + } + str = PyArray_realloc(*strp, new_cap); + if (str != NULL) { + *strp = str; + *maxp = new_cap; + } + } + return str; +} + static int -dump_data(char **string, int *n, int *max_n, char *data, int nd, +dump_data(char **string, Py_ssize_t *n, Py_ssize_t *max_n, char *data, int nd, npy_intp *dimensions, npy_intp *strides, PyArrayObject* self) { PyArray_Descr *descr=PyArray_DESCR(self); - PyObject *op, *sp; + PyObject *op = NULL, *sp = NULL; char *ostring; - npy_intp i, N; + npy_intp i, N, ret = 0; -#define CHECK_MEMORY do { if (*n >= *max_n-16) { \ - *max_n *= 2; \ - *string = (char *)PyArray_realloc(*string, *max_n); \ - }} while (0) +#define CHECK_MEMORY do { \ + if (extend(string, *n, max_n) == NULL) { \ + ret = -1; \ + goto end; \ + } \ + } while (0) if (nd == 0) { if ((op = descr->f->getitem(data, self)) == NULL) { @@ -449,17 +476,14 @@ dump_data(char **string, int *n, int *max_n, char *data, int nd, } sp = PyObject_Repr(op); if (sp == NULL) { - Py_DECREF(op); - return -1; + ret = -1; + goto end; } ostring = PyString_AsString(sp); N = PyString_Size(sp)*sizeof(char); *n += N; CHECK_MEMORY; memmove(*string + (*n - N), ostring, N); - Py_DECREF(sp); - Py_DECREF(op); - return 0; } else { CHECK_MEMORY; @@ -482,10 +506,14 @@ dump_data(char **string, int *n, int *max_n, char *data, int nd, CHECK_MEMORY; (*string)[*n] = ']'; *n += 1; - return 0; } #undef CHECK_MEMORY + +end: + Py_XDECREF(op); + Py_XDECREF(sp); + return ret; } /*NUMPY_API @@ -555,21 +583,13 @@ array_repr_builtin(PyArrayObject *self, int repr) { PyObject *ret; char *string; - int n, max_n; - - max_n = PyArray_NBYTES(self)*4*sizeof(char) + 7; + /* max_n initial value is arbitrary, dump_data will extend it */ + Py_ssize_t n = 0, max_n = PyArray_NBYTES(self) * 4 + 7; if ((string = PyArray_malloc(max_n)) == NULL) { return PyErr_NoMemory(); } - if (repr) { - n = 6; - sprintf(string, "array("); - } - else { - n = 0; - } if (dump_data(&string, &n, &max_n, PyArray_DATA(self), PyArray_NDIM(self), PyArray_DIMS(self), PyArray_STRIDES(self), self) < 0) { @@ -579,14 +599,15 @@ array_repr_builtin(PyArrayObject *self, int repr) if (repr) { if (PyArray_ISEXTENDED(self)) { - char buf[100]; - PyOS_snprintf(buf, sizeof(buf), "%d", PyArray_DESCR(self)->elsize); - sprintf(string+n, ", '%c%s')", PyArray_DESCR(self)->type, buf); - ret = PyUString_FromStringAndSize(string, n + 6 + strlen(buf)); + ret = PyUString_FromFormat("array(%s, '%c%d')", + string, + PyArray_DESCR(self)->type, + PyArray_DESCR(self)->elsize); } else { - sprintf(string+n, ", '%c')", PyArray_DESCR(self)->type); - ret = PyUString_FromStringAndSize(string, n+6); + ret = PyUString_FromFormat("array(%s, '%c')", + string, + PyArray_DESCR(self)->type); } } else { diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index 92752be92..89b5404b4 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -3,8 +3,11 @@ #include "Python.h" #include "structmember.h" + #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE + +#include "numpy/npy_common.h" #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" #include "npy_pycompat.h" @@ -19,8 +22,16 @@ #include "_datetime.h" #include "arrayobject.h" #include "alloc.h" +#ifdef NPY_HAVE_SSE2_INTRINSICS +#include <emmintrin.h> +#endif #include "numpyos.h" +#include <string.h> + +#include "cblasfuncs.h" +#include "npy_cblas.h" +#include <limits.h> /* @@ -492,7 +503,7 @@ STRING_setitem(PyObject *op, char *ov, PyArrayObject *ap) return -1; } #endif - if (PyBytes_AsStringAndSize(temp, &ptr, &len) == -1) { + if (PyBytes_AsStringAndSize(temp, &ptr, &len) < 0) { Py_DECREF(temp); return -1; } @@ -2627,12 +2638,14 @@ STRING_compare(char *ip1, char *ip2, PyArrayObject *ap) const unsigned char *c1 = (unsigned char *)ip1; const unsigned char *c2 = (unsigned char *)ip2; const size_t len = PyArray_DESCR(ap)->elsize; - size_t i; + int i; - for(i = 0; i < len; ++i) { - if (c1[i] != c2[i]) { - return (c1[i] > c2[i]) ? 1 : -1; - } + i = memcmp(c1, c2, len); + if (i > 0) { + return 1; + } + else if (i < 0) { + return -1; } return 0; } @@ -2758,26 +2771,52 @@ finish: #define _LESS_THAN_OR_EQUAL(a,b) ((a) <= (b)) +static int +BOOL_argmax(npy_bool *ip, npy_intp n, npy_intp *max_ind, + PyArrayObject *NPY_UNUSED(aip)) + +{ + npy_intp i = 0; + /* memcmp like logical_and on i386 is maybe slower for small arrays */ +#ifdef NPY_HAVE_SSE2_INTRINSICS + const __m128i zero = _mm_setzero_si128(); + for (; i < n - (n % 32); i+=32) { + __m128i d1 = _mm_loadu_si128((__m128i*)&ip[i]); + __m128i d2 = _mm_loadu_si128((__m128i*)&ip[i + 16]); + d1 = _mm_cmpeq_epi8(d1, zero); + d2 = _mm_cmpeq_epi8(d2, zero); + if (_mm_movemask_epi8(_mm_min_epu8(d1, d2)) != 0xFFFF) { + break; + } + } +#endif + for (; i < n; i++) { + if (ip[i]) { + *max_ind = i; + return 0; + } + } + *max_ind = 0; + return 0; +} + /**begin repeat * - * #fname = BOOL, - * BYTE, UBYTE, SHORT, USHORT, INT, UINT, + * #fname = BYTE, UBYTE, SHORT, USHORT, INT, UINT, * LONG, ULONG, LONGLONG, ULONGLONG, * HALF, FLOAT, DOUBLE, LONGDOUBLE, * CFLOAT, CDOUBLE, CLONGDOUBLE, * DATETIME, TIMEDELTA# - * #type = npy_bool, - * npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, + * #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, * npy_long, npy_ulong, npy_longlong, npy_ulonglong, * npy_half, npy_float, npy_double, npy_longdouble, * npy_float, npy_double, npy_longdouble, * npy_datetime, npy_timedelta# - * #isbool = 1, 0*19# - * #isfloat = 0*11, 1*7, 0*2# - * #isnan = nop*11, npy_half_isnan, npy_isnan*6, nop*2# - * #le = _LESS_THAN_OR_EQUAL*11, npy_half_le, _LESS_THAN_OR_EQUAL*8# - * #iscomplex = 0*15, 1*3, 0*2# - * #incr = ip++*15, ip+=2*3, ip++*2# + * #isfloat = 0*10, 1*7, 0*2# + * #isnan = nop*10, npy_half_isnan, npy_isnan*6, nop*2# + * #le = _LESS_THAN_OR_EQUAL*10, npy_half_le, _LESS_THAN_OR_EQUAL*8# + * #iscomplex = 0*14, 1*3, 0*2# + * #incr = ip++*14, ip+=2*3, ip++*2# */ static int @fname@_argmax(@type@ *ip, npy_intp n, npy_intp *max_ind, @@ -2803,12 +2842,6 @@ static int return 0; } #endif -#if @isbool@ - if (mp) { - /* True encountered; it's maximal */ - return 0; - } -#endif for (i = 1; i < n; i++) { @incr@; @@ -2837,12 +2870,6 @@ static int break; } #endif -#if @isbool@ - if (mp) { - /* True encountered; it's maximal */ - break; - } -#endif } #endif } @@ -2851,26 +2878,37 @@ static int /**end repeat**/ +static int +BOOL_argmin(npy_bool *ip, npy_intp n, npy_intp *min_ind, + PyArrayObject *NPY_UNUSED(aip)) + +{ + npy_bool * p = memchr(ip, 0, n * sizeof(*ip)); + if (p == NULL) { + *min_ind = 0; + return 0; + } + *min_ind = p - ip; + return 0; +} + /**begin repeat * - * #fname = BOOL, - * BYTE, UBYTE, SHORT, USHORT, INT, UINT, + * #fname = BYTE, UBYTE, SHORT, USHORT, INT, UINT, * LONG, ULONG, LONGLONG, ULONGLONG, * HALF, FLOAT, DOUBLE, LONGDOUBLE, * CFLOAT, CDOUBLE, CLONGDOUBLE, * DATETIME, TIMEDELTA# - * #type = npy_bool, - * npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, + * #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, * npy_long, npy_ulong, npy_longlong, npy_ulonglong, * npy_half, npy_float, npy_double, npy_longdouble, * npy_float, npy_double, npy_longdouble, * npy_datetime, npy_timedelta# - * #isbool = 1, 0*19# - * #isfloat = 0*11, 1*7, 0*2# - * #isnan = nop*11, npy_half_isnan, npy_isnan*6, nop*2# - * #le = _LESS_THAN_OR_EQUAL*11, npy_half_le, _LESS_THAN_OR_EQUAL*8# - * #iscomplex = 0*15, 1*3, 0*2# - * #incr = ip++*15, ip+=2*3, ip++*2# + * #isfloat = 0*10, 1*7, 0*2# + * #isnan = nop*10, npy_half_isnan, npy_isnan*6, nop*2# + * #le = _LESS_THAN_OR_EQUAL*10, npy_half_le, _LESS_THAN_OR_EQUAL*8# + * #iscomplex = 0*14, 1*3, 0*2# + * #incr = ip++*14, ip+=2*3, ip++*2# */ static int @fname@_argmin(@type@ *ip, npy_intp n, npy_intp *min_ind, @@ -2896,12 +2934,6 @@ static int return 0; } #endif -#if @isbool@ - if (!mp) { - /* False encountered; it's minimal */ - return 0; - } -#endif for (i = 1; i < n; i++) { @incr@; @@ -2930,12 +2962,6 @@ static int break; } #endif -#if @isbool@ - if (!mp) { - /* False encountered; it's minimal */ - break; - } -#endif } #endif } @@ -2991,7 +3017,7 @@ static int memcpy(mp, ip, elsize); *max_ind = 0; for (i = 1; i < n; i++) { - ip += elsize; + ip += elsize / sizeof(@type@); if (@fname@_compare(ip, mp, aip) > 0) { memcpy(mp, ip, elsize); *max_ind = i; @@ -3048,7 +3074,7 @@ static int memcpy(mp, ip, elsize); *min_ind = 0; for(i=1; i<n; i++) { - ip += elsize; + ip += elsize / sizeof(@type@); if (@fname@_compare(mp,ip,aip) > 0) { memcpy(mp, ip, elsize); *min_ind=i; @@ -3074,6 +3100,115 @@ static int * dot means inner product */ +/************************** MAYBE USE CBLAS *********************************/ + + +/**begin repeat + * + * #name = FLOAT, DOUBLE# + * #type = npy_float, npy_double# + * #prefix = s, d# + */ +NPY_NO_EXPORT void +@name@_dot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, + npy_intp n, void *NPY_UNUSED(ignore)) +{ +#if defined(HAVE_CBLAS) + int is1b = blas_stride(is1, sizeof(@type@)); + int is2b = blas_stride(is2, sizeof(@type@)); + + if (is1b && is2b) + { + double sum = 0.; /* double for stability */ + + while (n > 0) { + int chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK; + + sum += cblas_@prefix@dot(chunk, + (@type@ *) ip1, is1b, + (@type@ *) ip2, is2b); + /* use char strides here */ + ip1 += chunk * is1; + ip2 += chunk * is2; + n -= chunk; + } + *((@type@ *)op) = (@type@)sum; + } + else +#endif + { + @type@ sum = (@type@)0; /* could make this double */ + npy_intp i; + + for (i = 0; i < n; i++, ip1 += is1, ip2 += is2) { + const @type@ ip1r = *((@type@ *)ip1); + const @type@ ip2r = *((@type@ *)ip2); + + sum += ip1r * ip2r; + } + *((@type@ *)op) = sum; + } +} +/**end repeat**/ + +/**begin repeat + * + * #name = CFLOAT, CDOUBLE# + * #ctype = npy_cfloat, npy_cdouble# + * #type = npy_float, npy_double# + * #prefix = c, z# + */ +NPY_NO_EXPORT void +@name@_dot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, + char *op, npy_intp n, void *NPY_UNUSED(ignore)) +{ +#if defined(HAVE_CBLAS) + int is1b = blas_stride(is1, sizeof(@ctype@)); + int is2b = blas_stride(is2, sizeof(@ctype@)); + + if (is1b && is2b) { + double sum[2] = {0., 0.}; /* double for stability */ + + while (n > 0) { + int chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK; + @type@ tmp[2]; + + cblas_@prefix@dotu_sub((int)n, ip1, is1b, ip2, is2b, tmp); + sum[0] += (double)tmp[0]; + sum[1] += (double)tmp[1]; + /* use char strides here */ + ip1 += chunk * is1; + ip2 += chunk * is2; + n -= chunk; + } + ((@type@ *)op)[0] = (@type@)sum[0]; + ((@type@ *)op)[1] = (@type@)sum[1]; + } + else +#endif + { + @type@ sumr = (@type@)0.0; + @type@ sumi = (@type@)0.0; + npy_intp i; + + for (i = 0; i < n; i++, ip1 += is1, ip2 += is2) { + const @type@ ip1r = ((@type@ *)ip1)[0]; + const @type@ ip1i = ((@type@ *)ip1)[1]; + const @type@ ip2r = ((@type@ *)ip2)[0]; + const @type@ ip2i = ((@type@ *)ip2)[1]; + + sumr += ip1r * ip2r - ip1i * ip2i; + sumi += ip1r * ip2i + ip1i * ip2r; + } + ((@type@ *)op)[0] = sumr; + ((@type@ *)op)[1] = sumi; + } +} + +/**end repeat**/ + +/**************************** NO CBLAS VERSIONS *****************************/ + static void BOOL_dot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp n, void *NPY_UNUSED(ignore)) @@ -3094,16 +3229,13 @@ BOOL_dot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp n, * * #name = BYTE, UBYTE, SHORT, USHORT, INT, UINT, * LONG, ULONG, LONGLONG, ULONGLONG, - * FLOAT, DOUBLE, LONGDOUBLE, - * DATETIME, TIMEDELTA# + * LONGDOUBLE, DATETIME, TIMEDELTA# * #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, * npy_long, npy_ulong, npy_longlong, npy_ulonglong, - * npy_float, npy_double, npy_longdouble, - * npy_datetime, npy_timedelta# + * npy_longdouble, npy_datetime, npy_timedelta# * #out = npy_long, npy_ulong, npy_long, npy_ulong, npy_long, npy_ulong, * npy_long, npy_ulong, npy_longlong, npy_ulonglong, - * npy_float, npy_double, npy_longdouble, - * npy_datetime, npy_timedelta# + * npy_longdouble, npy_datetime, npy_timedelta# */ static void @name@_dot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp n, @@ -3121,8 +3253,8 @@ static void /**end repeat**/ static void -HALF_dot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp n, - void *NPY_UNUSED(ignore)) +HALF_dot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, + npy_intp n, void *NPY_UNUSED(ignore)) { float tmp = 0.0f; npy_intp i; @@ -3134,28 +3266,27 @@ HALF_dot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp n, *((npy_half *)op) = npy_float_to_half(tmp); } -/**begin repeat - * - * #name = CFLOAT, CDOUBLE, CLONGDOUBLE# - * #type = npy_float, npy_double, npy_longdouble# - */ -static void @name@_dot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, - char *op, npy_intp n, void *NPY_UNUSED(ignore)) +static void +CLONGDOUBLE_dot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, + char *op, npy_intp n, void *NPY_UNUSED(ignore)) { - @type@ tmpr = (@type@)0.0, tmpi=(@type@)0.0; + npy_longdouble tmpr = 0.0L; + npy_longdouble tmpi = 0.0L; npy_intp i; for (i = 0; i < n; i++, ip1 += is1, ip2 += is2) { - tmpr += ((@type@ *)ip1)[0] * ((@type@ *)ip2)[0] - - ((@type@ *)ip1)[1] * ((@type@ *)ip2)[1]; - tmpi += ((@type@ *)ip1)[1] * ((@type@ *)ip2)[0] - + ((@type@ *)ip1)[0] * ((@type@ *)ip2)[1]; + const npy_longdouble ip1r = ((npy_longdouble *)ip1)[0]; + const npy_longdouble ip1i = ((npy_longdouble *)ip1)[1]; + const npy_longdouble ip2r = ((npy_longdouble *)ip2)[0]; + const npy_longdouble ip2i = ((npy_longdouble *)ip2)[1]; + + tmpr += ip1r * ip2r - ip1i * ip2i; + tmpi += ip1r * ip2i + ip1i * ip2r; } - ((@type@ *)op)[0] = tmpr; ((@type@ *)op)[1] = tmpi; + ((npy_longdouble *)op)[0] = tmpr; + ((npy_longdouble *)op)[1] = tmpi; } -/**end repeat**/ - static void OBJECT_dot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp n, void *NPY_UNUSED(ignore)) @@ -3670,6 +3801,90 @@ static int #define OBJECT_fasttake NULL +/* + ***************************************************************************** + ** small correlate ** + ***************************************************************************** + */ + +/* + * Compute correlation of data with with small kernels + * Calling a BLAS dot product for the inner loop of the correlation is overkill + * for small kernels. It is faster to compute it directly. + * Intended to be used by _pyarray_correlate so no input verifications is done + * especially it does not handle the boundaries, they should be handled by the + * caller. + * Returns 0 if kernel is considered too large or types are not supported, then + * the regular array dot should be used to process the data. + * + * d_, dstride, nd, dtype: data pointer, its stride in bytes, number of + * elements and type of data + * k_, kstride, nk, ktype: kernel pointer, its stride in bytes, number of + * elements and type of data + * out_, ostride: output data pointer and its stride in bytes + */ +NPY_NO_EXPORT int +small_correlate(const char * d_, npy_intp dstride, + npy_intp nd, enum NPY_TYPES dtype, + const char * k_, npy_intp kstride, + npy_intp nk, enum NPY_TYPES ktype, + char * out_, npy_intp ostride) +{ + /* only handle small kernels and uniform types */ + if (nk > 11 || dtype != ktype) { + return 0; + } + + switch (dtype) { +/**begin repeat + * Float types + * #type = npy_float, npy_double# + * #TYPE = NPY_FLOAT, NPY_DOUBLE# + */ + case @TYPE@: + { + npy_intp i; + const @type@ * d = (@type@*)d_; + const @type@ * k = (@type@*)k_; + @type@ * out = (@type@*)out_; + dstride /= sizeof(@type@); + kstride /= sizeof(@type@); + ostride /= sizeof(@type@); + /* unroll inner loop to optimize register usage of the kernel*/ + switch (nk) { +/**begin repeat1 + * #ksz_outer = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11# */ + case @ksz_outer@: + { +/**begin repeat2 + * #ksz = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11# */ +#if @ksz@ <= @ksz_outer@ + /* load kernel */ + const @type@ k@ksz@ = k[(@ksz@ - 1) * kstride]; +#endif +/**end repeat2**/ + for (i = 0; i < nd; i++) { + @type@ s = 0; +/**begin repeat2 + * #ksz = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11# */ +#if @ksz@ <= @ksz_outer@ + s += d[(i + @ksz@ - 1) * dstride] * k@ksz@; +#endif +/**end repeat2**/ + out[i * ostride] = s; + } + return 1; + } +/**end repeat1**/ + default: + return 0; + } + } +/**end repeat**/ + default: + return 0; + } +} /* ***************************************************************************** @@ -3696,6 +3911,7 @@ static int * #align = char, char, npy_ucs4# * #NAME = Void, String, Unicode# * #endian = |, |, =# + * #flags = 0, 0, NPY_NEEDS_INIT# */ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = { { @@ -3775,8 +3991,8 @@ static PyArray_Descr @from@_Descr = { NPY_@from@LTR, /* byteorder */ '@endian@', - /* flags */ - 0, + /* flags, unicode needs init as py3.3 does not like printing garbage */ + @flags@, /* type_num */ NPY_@from@, /* elsize */ @@ -3922,7 +4138,8 @@ NPY_NO_EXPORT PyArray_Descr @from@_Descr = { /* elsize */ @num@ * sizeof(@fromtype@), /* alignment */ - @num@ * _ALIGN(@fromtype@), + @num@ * _ALIGN(@fromtype@) > NPY_MAX_COPY_ALIGNMENT ? + NPY_MAX_COPY_ALIGNMENT : @num@ * _ALIGN(@fromtype@), /* subarray */ NULL, /* fields */ @@ -4268,7 +4485,8 @@ set_typeinfo(PyObject *dict) #endif NPY_@name@, NPY_BITSOF_@name@, - @num@ * _ALIGN(@type@), + @num@ * _ALIGN(@type@) > NPY_MAX_COPY_ALIGNMENT ? + NPY_MAX_COPY_ALIGNMENT : @num@ * _ALIGN(@type@), (PyObject *) &Py@Name@ArrType_Type)); Py_DECREF(s); diff --git a/numpy/core/src/multiarray/arraytypes.h b/numpy/core/src/multiarray/arraytypes.h index ff7d4ae40..15520ce74 100644 --- a/numpy/core/src/multiarray/arraytypes.h +++ b/numpy/core/src/multiarray/arraytypes.h @@ -1,6 +1,8 @@ #ifndef _NPY_ARRAYTYPES_H_ #define _NPY_ARRAYTYPES_H_ +#include "common.h" + #ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT PyArray_Descr LONGLONG_Descr; extern NPY_NO_EXPORT PyArray_Descr LONG_Descr; @@ -10,4 +12,26 @@ extern NPY_NO_EXPORT PyArray_Descr INT_Descr; NPY_NO_EXPORT int set_typeinfo(PyObject *dict); +/* needed for blasfuncs */ +NPY_NO_EXPORT void +FLOAT_dot(char *, npy_intp, char *, npy_intp, char *, npy_intp, void *); + +NPY_NO_EXPORT void +CFLOAT_dot(char *, npy_intp, char *, npy_intp, char *, npy_intp, void *); + +NPY_NO_EXPORT void +DOUBLE_dot(char *, npy_intp, char *, npy_intp, char *, npy_intp, void *); + +NPY_NO_EXPORT void +CDOUBLE_dot(char *, npy_intp, char *, npy_intp, char *, npy_intp, void *); + + +/* for _pyarray_correlate */ +NPY_NO_EXPORT int +small_correlate(const char * d_, npy_intp dstride, + npy_intp nd, enum NPY_TYPES dtype, + const char * k_, npy_intp kstride, + npy_intp nk, enum NPY_TYPES ktype, + char * out_, npy_intp ostride); + #endif diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c index ea1a885ed..7f7607e1f 100644 --- a/numpy/core/src/multiarray/buffer.c +++ b/numpy/core/src/multiarray/buffer.c @@ -270,6 +270,7 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, tmp = name; #endif if (tmp == NULL || PyBytes_AsStringAndSize(tmp, &p, &len) < 0) { + PyErr_Clear(); PyErr_SetString(PyExc_ValueError, "invalid field name"); return -1; } @@ -628,6 +629,8 @@ array_getbuffer(PyObject *obj, Py_buffer *view, int flags) { PyArrayObject *self; _buffer_info_t *info = NULL; + int i; + Py_ssize_t sd; self = (PyArrayObject*)obj; @@ -702,6 +705,31 @@ array_getbuffer(PyObject *obj, Py_buffer *view, int flags) } if ((flags & PyBUF_STRIDES) == PyBUF_STRIDES) { view->strides = info->strides; + +#ifdef NPY_RELAXED_STRIDES_CHECKING + /* + * If NPY_RELAXED_STRIDES_CHECKING is on, the array may be + * contiguous, but it won't look that way to Python when it + * tries to determine contiguity by looking at the strides + * (since one of the elements may be -1). In that case, just + * regenerate strides from shape. + */ + if (PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS) && + !((flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)) { + sd = view->itemsize; + for (i = view->ndim-1; i >= 0; --i) { + view->strides[i] = sd; + sd *= view->shape[i]; + } + } + else if (PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)) { + sd = view->itemsize; + for (i = 0; i < view->ndim; ++i) { + view->strides[i] = sd; + sd *= view->shape[i]; + } + } +#endif } else { view->strides = NULL; @@ -921,7 +949,7 @@ _descriptor_from_pep3118_format_fast(char *s, PyObject **result) *result = (PyObject*)PyArray_DescrNewByteorder(descr, byte_order); Py_DECREF(descr); } - + return 1; } diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c index 50938be4c..5563a2515 100644 --- a/numpy/core/src/multiarray/calculation.c +++ b/numpy/core/src/multiarray/calculation.c @@ -1182,7 +1182,7 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o NPY_NO_EXPORT PyObject * PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) { - if (PyArray_ISCOMPLEX(self)) { + if (PyArray_ISCOMPLEX(self) || PyArray_ISOBJECT(self)) { if (out == NULL) { return PyArray_GenericUnaryFunction(self, n_ops.conjugate); diff --git a/numpy/core/blasdot/_dotblas.c b/numpy/core/src/multiarray/cblasfuncs.c index 48aa39ff8..f8f508ed0 100644 --- a/numpy/core/blasdot/_dotblas.c +++ b/numpy/core/src/multiarray/cblasfuncs.c @@ -4,141 +4,15 @@ */ #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include "Python.h" - -#include "numpy/arrayobject.h" -#include "npy_config.h" -#include "npy_pycompat.h" -#include "ufunc_override.h" -#ifndef CBLAS_HEADER -#define CBLAS_HEADER "cblas.h" -#endif -#include CBLAS_HEADER +#define _MULTIARRAYMODULE +#include <Python.h> #include <assert.h> -#include <limits.h> -#include <stdio.h> - -static char module_doc[] = -"This module provides a BLAS optimized\nmatrix multiply, inner product and dot for numpy arrays"; - -static PyArray_DotFunc *oldFunctions[NPY_NTYPES]; - -#define MIN(a, b) ((a) < (b) ? (a) : (b)) - -/* - * Convert NumPy stride to BLAS stride. Returns 0 if conversion cannot be done - * (BLAS won't handle negative or zero strides the way we want). - */ -static NPY_INLINE int -blas_stride(npy_intp stride, unsigned itemsize) -{ - if (stride <= 0 || stride % itemsize != 0) { - return 0; - } - stride /= itemsize; - - if (stride > INT_MAX) { - return 0; - } - return stride; -} - -/* - * The following functions do a "chunked" dot product using BLAS when - * sizeof(npy_intp) > sizeof(int), because BLAS libraries can typically not - * handle more than INT_MAX elements per call. - * - * The chunksize is the greatest power of two less than INT_MAX. - */ -#if NPY_MAX_INTP > INT_MAX -# define CHUNKSIZE (INT_MAX / 2 + 1) -#else -# define CHUNKSIZE NPY_MAX_INTP -#endif - -static void -FLOAT_dot(void *a, npy_intp stridea, void *b, npy_intp strideb, void *res, - npy_intp n, void *tmp) -{ - int na = blas_stride(stridea, sizeof(float)); - int nb = blas_stride(strideb, sizeof(float)); - - if (na && nb) { - double r = 0.; /* double for stability */ - float *fa = a, *fb = b; +#include <numpy/arrayobject.h> +#include "npy_cblas.h" +#include "arraytypes.h" +#include "common.h" - while (n > 0) { - int chunk = MIN(n, CHUNKSIZE); - - r += cblas_sdot(chunk, fa, na, fb, nb); - fa += chunk * na; - fb += chunk * nb; - n -= chunk; - } - *((float *)res) = r; - } - else { - oldFunctions[NPY_FLOAT](a, stridea, b, strideb, res, n, tmp); - } -} - -static void -DOUBLE_dot(void *a, npy_intp stridea, void *b, npy_intp strideb, void *res, - npy_intp n, void *tmp) -{ - int na = blas_stride(stridea, sizeof(double)); - int nb = blas_stride(strideb, sizeof(double)); - - if (na && nb) { - double r = 0.; - double *da = a, *db = b; - - while (n > 0) { - int chunk = MIN(n, CHUNKSIZE); - - r += cblas_ddot(chunk, da, na, db, nb); - da += chunk * na; - db += chunk * nb; - n -= chunk; - } - *((double *)res) = r; - } - else { - oldFunctions[NPY_DOUBLE](a, stridea, b, strideb, res, n, tmp); - } -} - -static void -CFLOAT_dot(void *a, npy_intp stridea, void *b, npy_intp strideb, void *res, - npy_intp n, void *tmp) -{ - int na = blas_stride(stridea, sizeof(npy_cfloat)); - int nb = blas_stride(strideb, sizeof(npy_cfloat)); - - if (na && nb) { - cblas_cdotu_sub((int)n, (float *)a, na, (float *)b, nb, (float *)res); - } - else { - oldFunctions[NPY_CFLOAT](a, stridea, b, strideb, res, n, tmp); - } -} - -static void -CDOUBLE_dot(void *a, npy_intp stridea, void *b, npy_intp strideb, void *res, - npy_intp n, void *tmp) -{ - int na = blas_stride(stridea, sizeof(npy_cdouble)); - int nb = blas_stride(strideb, sizeof(npy_cdouble)); - - if (na && nb) { - cblas_zdotu_sub((int)n, (double *)a, na, (double *)b, nb, - (double *)res); - } - else { - oldFunctions[NPY_CDOUBLE](a, stridea, b, strideb, res, n, tmp); - } -} /* * Helper: call appropriate BLAS dot function for typenum. @@ -148,29 +22,27 @@ static void blas_dot(int typenum, npy_intp n, void *a, npy_intp stridea, void *b, npy_intp strideb, void *res) { - PyArray_DotFunc *dot = NULL; switch (typenum) { case NPY_DOUBLE: - dot = DOUBLE_dot; + DOUBLE_dot(a, stridea, b, strideb, res, n, NULL); break; case NPY_FLOAT: - dot = FLOAT_dot; + FLOAT_dot(a, stridea, b, strideb, res, n, NULL); break; case NPY_CDOUBLE: - dot = CDOUBLE_dot; + CDOUBLE_dot(a, stridea, b, strideb, res, n, NULL); break; case NPY_CFLOAT: - dot = CFLOAT_dot; + CFLOAT_dot(a, stridea, b, strideb, res, n, NULL); break; } - assert(dot != NULL); - dot(a, stridea, b, strideb, res, n, NULL); } static const double oneD[2] = {1.0, 0.0}, zeroD[2] = {0.0, 0.0}; static const float oneF[2] = {1.0, 0.0}, zeroF[2] = {0.0, 0.0}; + /* * Helper: dispatch to appropriate cblas_?gemm for typenum. */ @@ -182,7 +54,6 @@ gemm(int typenum, enum CBLAS_ORDER order, { const void *Adata = PyArray_DATA(A), *Bdata = PyArray_DATA(B); void *Rdata = PyArray_DATA(R); - int ldc = PyArray_DIM(R, 1) > 1 ? PyArray_DIM(R, 1) : 1; switch (typenum) { @@ -240,124 +111,52 @@ gemv(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans, } -static npy_bool altered=NPY_FALSE; - -/* - * alterdot() changes all dot functions to use blas. - */ -static PyObject * -dotblas_alterdot(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyArray_Descr *descr; - - if (!PyArg_ParseTuple(args, "")) return NULL; - - /* Replace the dot functions to the ones using blas */ - - if (!altered) { - descr = PyArray_DescrFromType(NPY_FLOAT); - oldFunctions[NPY_FLOAT] = descr->f->dotfunc; - descr->f->dotfunc = (PyArray_DotFunc *)FLOAT_dot; - - descr = PyArray_DescrFromType(NPY_DOUBLE); - oldFunctions[NPY_DOUBLE] = descr->f->dotfunc; - descr->f->dotfunc = (PyArray_DotFunc *)DOUBLE_dot; - - descr = PyArray_DescrFromType(NPY_CFLOAT); - oldFunctions[NPY_CFLOAT] = descr->f->dotfunc; - descr->f->dotfunc = (PyArray_DotFunc *)CFLOAT_dot; - - descr = PyArray_DescrFromType(NPY_CDOUBLE); - oldFunctions[NPY_CDOUBLE] = descr->f->dotfunc; - descr->f->dotfunc = (PyArray_DotFunc *)CDOUBLE_dot; - - altered = NPY_TRUE; - } - - Py_INCREF(Py_None); - return Py_None; -} - -/* - * restoredot() restores dots to defaults. - */ -static PyObject * -dotblas_restoredot(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyArray_Descr *descr; - - if (!PyArg_ParseTuple(args, "")) return NULL; - - if (altered) { - descr = PyArray_DescrFromType(NPY_FLOAT); - descr->f->dotfunc = oldFunctions[NPY_FLOAT]; - oldFunctions[NPY_FLOAT] = NULL; - Py_XDECREF(descr); - - descr = PyArray_DescrFromType(NPY_DOUBLE); - descr->f->dotfunc = oldFunctions[NPY_DOUBLE]; - oldFunctions[NPY_DOUBLE] = NULL; - Py_XDECREF(descr); - - descr = PyArray_DescrFromType(NPY_CFLOAT); - descr->f->dotfunc = oldFunctions[NPY_CFLOAT]; - oldFunctions[NPY_CFLOAT] = NULL; - Py_XDECREF(descr); - - descr = PyArray_DescrFromType(NPY_CDOUBLE); - descr->f->dotfunc = oldFunctions[NPY_CDOUBLE]; - oldFunctions[NPY_CDOUBLE] = NULL; - Py_XDECREF(descr); - - altered = NPY_FALSE; - } - - Py_INCREF(Py_None); - return Py_None; -} - typedef enum {_scalar, _column, _row, _matrix} MatrixShape; + static MatrixShape _select_matrix_shape(PyArrayObject *array) { switch (PyArray_NDIM(array)) { - case 0: - return _scalar; - case 1: - if (PyArray_DIM(array, 0) > 1) - return _column; - return _scalar; - case 2: - if (PyArray_DIM(array, 0) > 1) { - if (PyArray_DIM(array, 1) == 1) + case 0: + return _scalar; + case 1: + if (PyArray_DIM(array, 0) > 1) return _column; - else - return _matrix; - } - if (PyArray_DIM(array, 1) == 1) return _scalar; - return _row; + case 2: + if (PyArray_DIM(array, 0) > 1) { + if (PyArray_DIM(array, 1) == 1) + return _column; + else + return _matrix; + } + if (PyArray_DIM(array, 1) == 1) + return _scalar; + return _row; } return _matrix; } -/* This also makes sure that the data segment is aligned with - an itemsize address as well by returning one if not true. -*/ +/* + * This also makes sure that the data segment is aligned with + * an itemsize address as well by returning one if not true. + */ static int _bad_strides(PyArrayObject *ap) { - register int itemsize = PyArray_ITEMSIZE(ap); - register int i, N=PyArray_NDIM(ap); - register npy_intp *strides = PyArray_STRIDES(ap); + int itemsize = PyArray_ITEMSIZE(ap); + int i, N=PyArray_NDIM(ap); + npy_intp *strides = PyArray_STRIDES(ap); - if (((npy_intp)(PyArray_DATA(ap)) % itemsize) != 0) + if (((npy_intp)(PyArray_DATA(ap)) % itemsize) != 0) { return 1; - for (i=0; i<N; i++) { - if ((strides[i] < 0) || (strides[i] % itemsize) != 0) + } + for (i = 0; i < N; i++) { + if ((strides[i] < 0) || (strides[i] % itemsize) != 0) { return 1; + } } return 0; @@ -369,110 +168,30 @@ _bad_strides(PyArrayObject *ap) * Like the generic numpy equivalent the product sum is over * the last dimension of a and the second-to-last dimension of b. * NB: The first argument is not conjugated.; + * + * This is for use by PyArray_MatrixProduct2. It is assumed on entry that + * the arrays ap1 and ap2 have a common data type given by typenum that is + * float, double, cfloat, or cdouble and have dimension <= 2. The + * __numpy_ufunc__ nonsense is also assumed to have been taken care of. */ -static PyObject * -dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwargs) +NPY_NO_EXPORT PyObject * +cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, + PyArrayObject *out) { - static PyObject *cached_npy_dot = NULL; - PyObject *override = NULL; - PyObject *module; - PyObject *op1, *op2; - PyArrayObject *ap1 = NULL, *ap2 = NULL, *out = NULL, *ret = NULL; - int errval; + PyArrayObject *ret = NULL; int j, lda, ldb; npy_intp l; - int typenum, nd; + int nd; npy_intp ap1stride = 0; npy_intp dimensions[NPY_MAXDIMS]; npy_intp numbytes; double prior1, prior2; PyTypeObject *subtype; - PyArray_Descr *dtype; MatrixShape ap1shape, ap2shape; - char* kwords[] = {"a", "b", "out", NULL }; - - if (cached_npy_dot == NULL) { - module = PyImport_ImportModule("numpy.core._dotblas"); - cached_npy_dot = PyDict_GetItemString(PyModule_GetDict(module), "dot"); - - Py_INCREF(cached_npy_dot); - Py_DECREF(module); - } - - errval = PyUFunc_CheckOverride(cached_npy_dot, "__call__", args, kwargs, - &override, 2); - if (errval) { - return NULL; - } - else if (override) { - return override; - } - - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|O", kwords, - &op1, &op2, &out)) { - return NULL; - } - if ((PyObject *)out == Py_None) { - out = NULL; - } - - /* - * "Matrix product" using the BLAS. - * Only works for float double and complex types. - */ - - typenum = PyArray_ObjectType(op1, 0); - typenum = PyArray_ObjectType(op2, typenum); - - /* This function doesn't handle other types */ - if ((typenum != NPY_DOUBLE && typenum != NPY_CDOUBLE && - typenum != NPY_FLOAT && typenum != NPY_CFLOAT)) { - return PyArray_Return((PyArrayObject *)PyArray_MatrixProduct2( - (PyObject *)op1, - (PyObject *)op2, - out)); - } - - dtype = PyArray_DescrFromType(typenum); - if (dtype == NULL) { - return NULL; - } - Py_INCREF(dtype); - ap1 = (PyArrayObject *)PyArray_FromAny(op1, dtype, 0, 0, NPY_ARRAY_ALIGNED, NULL); - if (ap1 == NULL) { - Py_DECREF(dtype); - return NULL; - } - ap2 = (PyArrayObject *)PyArray_FromAny(op2, dtype, 0, 0, NPY_ARRAY_ALIGNED, NULL); - if (ap2 == NULL) { - Py_DECREF(ap1); - return NULL; - } - - if ((PyArray_NDIM(ap1) > 2) || (PyArray_NDIM(ap2) > 2)) { - /* - * This function doesn't handle dimensions greater than 2 - * (or negative striding) -- other - * than to ensure the dot function is altered - */ - if (!altered) { - /* need to alter dot product */ - PyObject *tmp1, *tmp2; - tmp1 = PyTuple_New(0); - tmp2 = dotblas_alterdot(NULL, tmp1); - Py_DECREF(tmp1); - Py_DECREF(tmp2); - } - ret = (PyArrayObject *)PyArray_MatrixProduct2((PyObject *)ap1, - (PyObject *)ap2, - out); - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - } if (_bad_strides(ap1)) { - op1 = PyArray_NewCopy(ap1, NPY_ANYORDER); + PyObject *op1 = PyArray_NewCopy(ap1, NPY_ANYORDER); + Py_DECREF(ap1); ap1 = (PyArrayObject *)op1; if (ap1 == NULL) { @@ -480,7 +199,8 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa } } if (_bad_strides(ap2)) { - op2 = PyArray_NewCopy(ap2, NPY_ANYORDER); + PyObject *op2 = PyArray_NewCopy(ap2, NPY_ANYORDER); + Py_DECREF(ap2); ap2 = (PyArrayObject *)op2; if (ap2 == NULL) { @@ -494,7 +214,8 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa PyArrayObject *oap1, *oap2; oap1 = ap1; oap2 = ap2; /* One of ap1 or ap2 is a scalar */ - if (ap1shape == _scalar) { /* Make ap2 the scalar */ + if (ap1shape == _scalar) { + /* Make ap2 the scalar */ PyArrayObject *t = ap1; ap1 = ap2; ap2 = t; @@ -529,7 +250,7 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa l = PyArray_DIM(oap1, PyArray_NDIM(oap1) - 1); if (PyArray_DIM(oap2, 0) != l) { - PyErr_SetString(PyExc_ValueError, "matrices are not aligned"); + dot_alignment_error(oap1, PyArray_NDIM(oap1) - 1, oap2, 0); goto fail; } nd = PyArray_NDIM(ap1) + PyArray_NDIM(ap2) - 2; @@ -541,7 +262,8 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa * Either PyArray_NDIM(ap1) is 1 dim or PyArray_NDIM(ap2) is 1 dim * and the other is 2-dim */ - dimensions[0] = (PyArray_NDIM(oap1) == 2) ? PyArray_DIM(oap1, 0) : PyArray_DIM(oap2, 1); + dimensions[0] = (PyArray_NDIM(oap1) == 2) ? + PyArray_DIM(oap1, 0) : PyArray_DIM(oap2, 1); l = dimensions[0]; /* * Fix it so that dot(shape=(N,1), shape=(1,)) @@ -579,13 +301,15 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa l = PyArray_DIM(ap1, PyArray_NDIM(ap1) - 1); if (PyArray_DIM(ap2, 0) != l) { - PyErr_SetString(PyExc_ValueError, "matrices are not aligned"); + dot_alignment_error(ap1, PyArray_NDIM(ap1) - 1, ap2, 0); goto fail; } nd = PyArray_NDIM(ap1) + PyArray_NDIM(ap2) - 2; - if (nd == 1) - dimensions[0] = (PyArray_NDIM(ap1) == 2) ? PyArray_DIM(ap1, 0) : PyArray_DIM(ap2, 1); + if (nd == 1) { + dimensions[0] = (PyArray_NDIM(ap1) == 2) ? + PyArray_DIM(ap1, 0) : PyArray_DIM(ap2, 1); + } else if (nd == 2) { dimensions[0] = PyArray_DIM(ap1, 0); dimensions[1] = PyArray_DIM(ap2, 1); @@ -603,8 +327,9 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa subtype = Py_TYPE(ap1); } - if (out) { + if (out != NULL) { int d; + /* verify that out is usable */ if (Py_TYPE(out) != subtype || PyArray_NDIM(out) != nd || @@ -625,11 +350,12 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa } Py_INCREF(out); ret = out; - } else { + } + else { + PyObject *tmp = (PyObject *)(prior2 > prior1 ? ap2 : ap1); + ret = (PyArrayObject *)PyArray_New(subtype, nd, dimensions, - typenum, NULL, NULL, 0, 0, - (PyObject *) - (prior2 > prior1 ? ap2 : ap1)); + typenum, NULL, NULL, 0, 0, tmp); } if (ret == NULL) { @@ -637,7 +363,7 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa } numbytes = PyArray_NBYTES(ret); memset(PyArray_DATA(ret), 0, numbytes); - if (numbytes==0 || l == 0) { + if (numbytes == 0 || l == 0) { Py_DECREF(ap1); Py_DECREF(ap2); return PyArray_Return(ret); @@ -654,11 +380,14 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa if (typenum == NPY_DOUBLE) { if (l == 1) { *((double *)PyArray_DATA(ret)) = *((double *)PyArray_DATA(ap2)) * - *((double *)PyArray_DATA(ap1)); + *((double *)PyArray_DATA(ap1)); } else if (ap1shape != _matrix) { - cblas_daxpy(l, *((double *)PyArray_DATA(ap2)), (double *)PyArray_DATA(ap1), - ap1stride/sizeof(double), (double *)PyArray_DATA(ret), 1); + cblas_daxpy(l, + *((double *)PyArray_DATA(ap2)), + (double *)PyArray_DATA(ap1), + ap1stride/sizeof(double), + (double *)PyArray_DATA(ret), 1); } else { int maxind, oind, i, a1s, rets; @@ -666,7 +395,7 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa double val; maxind = (PyArray_DIM(ap1, 0) >= PyArray_DIM(ap1, 1) ? 0 : 1); - oind = 1-maxind; + oind = 1 - maxind; ptr = PyArray_DATA(ap1); rptr = PyArray_DATA(ret); l = PyArray_DIM(ap1, maxind); @@ -692,8 +421,11 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa res->imag = ptr1->real * ptr2->imag + ptr1->imag * ptr2->real; } else if (ap1shape != _matrix) { - cblas_zaxpy(l, (double *)PyArray_DATA(ap2), (double *)PyArray_DATA(ap1), - ap1stride/sizeof(npy_cdouble), (double *)PyArray_DATA(ret), 1); + cblas_zaxpy(l, + (double *)PyArray_DATA(ap2), + (double *)PyArray_DATA(ap1), + ap1stride/sizeof(npy_cdouble), + (double *)PyArray_DATA(ret), 1); } else { int maxind, oind, i, a1s, rets; @@ -701,7 +433,7 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa double *pval; maxind = (PyArray_DIM(ap1, 0) >= PyArray_DIM(ap1, 1) ? 0 : 1); - oind = 1-maxind; + oind = 1 - maxind; ptr = PyArray_DATA(ap1); rptr = PyArray_DATA(ret); l = PyArray_DIM(ap1, maxind); @@ -722,8 +454,11 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa *((float *)PyArray_DATA(ap1)); } else if (ap1shape != _matrix) { - cblas_saxpy(l, *((float *)PyArray_DATA(ap2)), (float *)PyArray_DATA(ap1), - ap1stride/sizeof(float), (float *)PyArray_DATA(ret), 1); + cblas_saxpy(l, + *((float *)PyArray_DATA(ap2)), + (float *)PyArray_DATA(ap1), + ap1stride/sizeof(float), + (float *)PyArray_DATA(ret), 1); } else { int maxind, oind, i, a1s, rets; @@ -731,7 +466,7 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa float val; maxind = (PyArray_DIM(ap1, 0) >= PyArray_DIM(ap1, 1) ? 0 : 1); - oind = 1-maxind; + oind = 1 - maxind; ptr = PyArray_DATA(ap1); rptr = PyArray_DATA(ret); l = PyArray_DIM(ap1, maxind); @@ -757,8 +492,11 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa res->imag = ptr1->real * ptr2->imag + ptr1->imag * ptr2->real; } else if (ap1shape != _matrix) { - cblas_caxpy(l, (float *)PyArray_DATA(ap2), (float *)PyArray_DATA(ap1), - ap1stride/sizeof(npy_cfloat), (float *)PyArray_DATA(ret), 1); + cblas_caxpy(l, + (float *)PyArray_DATA(ap2), + (float *)PyArray_DATA(ap1), + ap1stride/sizeof(npy_cfloat), + (float *)PyArray_DATA(ret), 1); } else { int maxind, oind, i, a1s, rets; @@ -766,7 +504,7 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa float *pval; maxind = (PyArray_DIM(ap1, 0) >= PyArray_DIM(ap1, 1) ? 0 : 1); - oind = 1-maxind; + oind = 1 - maxind; ptr = PyArray_DATA(ap1); rptr = PyArray_DATA(ret); l = PyArray_DIM(ap1, maxind); @@ -918,7 +656,7 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa Py_DECREF(ap2); return PyArray_Return(ret); - fail: +fail: Py_XDECREF(ap1); Py_XDECREF(ap2); Py_XDECREF(ret); @@ -933,65 +671,28 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa * floating point types. Like the generic NumPy equivalent the product * sum is over the last dimension of a and b. * NB: The first argument is not conjugated. + * + * This is for use by PyArray_InnerProduct. It is assumed on entry that the + * arrays ap1 and ap2 have a common data type given by typenum that is + * float, double, cfloat, or cdouble and have dimension <= 2, and have the + * contiguous flag set. The * __numpy_ufunc__ nonsense is also assumed to + * have been taken care of. */ -static PyObject * -dotblas_innerproduct(PyObject *NPY_UNUSED(dummy), PyObject *args) +NPY_NO_EXPORT PyObject * +cblas_innerproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2) { - PyObject *op1, *op2; - PyArrayObject *ap1, *ap2, *ret; int j, l, lda, ldb; - int typenum, nd; + int nd; + double prior1, prior2; + PyArrayObject *ret; npy_intp dimensions[NPY_MAXDIMS]; PyTypeObject *subtype; - double prior1, prior2; - - if (!PyArg_ParseTuple(args, "OO", &op1, &op2)) return NULL; - - /* - * Inner product using the BLAS. The product sum is taken along the last - * dimensions of the two arrays. - * Only speeds things up for float double and complex types. - */ - - - typenum = PyArray_ObjectType(op1, 0); - typenum = PyArray_ObjectType(op2, typenum); - - /* This function doesn't handle other types */ - if ((typenum != NPY_DOUBLE && typenum != NPY_CDOUBLE && - typenum != NPY_FLOAT && typenum != NPY_CFLOAT)) { - return PyArray_Return((PyArrayObject *)PyArray_InnerProduct(op1, op2)); - } - - ret = NULL; - ap1 = (PyArrayObject *)PyArray_ContiguousFromObject(op1, typenum, 0, 0); - if (ap1 == NULL) return NULL; - ap2 = (PyArrayObject *)PyArray_ContiguousFromObject(op2, typenum, 0, 0); - if (ap2 == NULL) goto fail; - - if ((PyArray_NDIM(ap1) > 2) || (PyArray_NDIM(ap2) > 2)) { - /* This function doesn't handle dimensions greater than 2 -- other - than to ensure the dot function is altered - */ - if (!altered) { - /* need to alter dot product */ - PyObject *tmp1, *tmp2; - tmp1 = PyTuple_New(0); - tmp2 = dotblas_alterdot(NULL, tmp1); - Py_DECREF(tmp1); - Py_DECREF(tmp2); - } - ret = (PyArrayObject *)PyArray_InnerProduct((PyObject *)ap1, - (PyObject *)ap2); - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - } if (PyArray_NDIM(ap1) == 0 || PyArray_NDIM(ap2) == 0) { /* One of ap1 or ap2 is a scalar */ - if (PyArray_NDIM(ap1) == 0) { /* Make ap2 the scalar */ + if (PyArray_NDIM(ap1) == 0) { + /* Make ap2 the scalar */ PyArrayObject *t = ap1; ap1 = ap2; ap2 = t; @@ -1002,18 +703,23 @@ dotblas_innerproduct(PyObject *NPY_UNUSED(dummy), PyObject *args) } nd = PyArray_NDIM(ap1); } - else { /* (PyArray_NDIM(ap1) <= 2 && PyArray_NDIM(ap2) <= 2) */ - /* Both ap1 and ap2 are vectors or matrices */ - l = PyArray_DIM(ap1, PyArray_NDIM(ap1)-1); + else { + /* + * (PyArray_NDIM(ap1) <= 2 && PyArray_NDIM(ap2) <= 2) + * Both ap1 and ap2 are vectors or matrices + */ + l = PyArray_DIM(ap1, PyArray_NDIM(ap1) - 1); - if (PyArray_DIM(ap2, PyArray_NDIM(ap2)-1) != l) { - PyErr_SetString(PyExc_ValueError, "matrices are not aligned"); + if (PyArray_DIM(ap2, PyArray_NDIM(ap2) - 1) != l) { + dot_alignment_error(ap1, PyArray_NDIM(ap1) - 1, + ap2, PyArray_NDIM(ap2) - 1); goto fail; } - nd = PyArray_NDIM(ap1)+PyArray_NDIM(ap2)-2; + nd = PyArray_NDIM(ap1) + PyArray_NDIM(ap2) - 2; if (nd == 1) - dimensions[0] = (PyArray_NDIM(ap1) == 2) ? PyArray_DIM(ap1, 0) : PyArray_DIM(ap2, 0); + dimensions[0] = (PyArray_NDIM(ap1) == 2) ? + PyArray_DIM(ap1, 0) : PyArray_DIM(ap2, 0); else if (nd == 2) { dimensions[0] = PyArray_DIM(ap1, 0); dimensions[1] = PyArray_DIM(ap2, 0); @@ -1027,36 +733,49 @@ dotblas_innerproduct(PyObject *NPY_UNUSED(dummy), PyObject *args) ret = (PyArrayObject *)PyArray_New(subtype, nd, dimensions, typenum, NULL, NULL, 0, 0, - (PyObject *)\ - (prior2 > prior1 ? ap2 : ap1)); + (PyObject *) + (prior2 > prior1 ? ap2 : ap1)); + + if (ret == NULL) { + goto fail; + } - if (ret == NULL) goto fail; - NPY_BEGIN_ALLOW_THREADS + NPY_BEGIN_ALLOW_THREADS; memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret)); if (PyArray_NDIM(ap2) == 0) { /* Multiplication by a scalar -- Level 1 BLAS */ if (typenum == NPY_DOUBLE) { - cblas_daxpy(l, *((double *)PyArray_DATA(ap2)), (double *)PyArray_DATA(ap1), 1, + cblas_daxpy(l, + *((double *)PyArray_DATA(ap2)), + (double *)PyArray_DATA(ap1), 1, (double *)PyArray_DATA(ret), 1); } else if (typenum == NPY_CDOUBLE) { - cblas_zaxpy(l, (double *)PyArray_DATA(ap2), (double *)PyArray_DATA(ap1), 1, + cblas_zaxpy(l, + (double *)PyArray_DATA(ap2), + (double *)PyArray_DATA(ap1), 1, (double *)PyArray_DATA(ret), 1); } else if (typenum == NPY_FLOAT) { - cblas_saxpy(l, *((float *)PyArray_DATA(ap2)), (float *)PyArray_DATA(ap1), 1, + cblas_saxpy(l, + *((float *)PyArray_DATA(ap2)), + (float *)PyArray_DATA(ap1), 1, (float *)PyArray_DATA(ret), 1); } else if (typenum == NPY_CFLOAT) { - cblas_caxpy(l, (float *)PyArray_DATA(ap2), (float *)PyArray_DATA(ap1), 1, + cblas_caxpy(l, + (float *)PyArray_DATA(ap2), + (float *)PyArray_DATA(ap1), 1, (float *)PyArray_DATA(ret), 1); } } else if (PyArray_NDIM(ap1) == 1 && PyArray_NDIM(ap2) == 1) { /* Dot product between two vectors -- Level 1 BLAS */ - blas_dot(typenum, l, PyArray_DATA(ap1), PyArray_ITEMSIZE(ap1), - PyArray_DATA(ap2), PyArray_ITEMSIZE(ap2), PyArray_DATA(ret)); + blas_dot(typenum, l, + PyArray_DATA(ap1), PyArray_ITEMSIZE(ap1), + PyArray_DATA(ap2), PyArray_ITEMSIZE(ap2), + PyArray_DATA(ret)); } else if (PyArray_NDIM(ap1) == 2 && PyArray_NDIM(ap2) == 1) { /* Matrix-vector multiplication -- Level 2 BLAS */ @@ -1068,115 +787,17 @@ dotblas_innerproduct(PyObject *NPY_UNUSED(dummy), PyObject *args) lda = (PyArray_DIM(ap2, 1) > 1 ? PyArray_DIM(ap2, 1) : 1); gemv(typenum, CblasRowMajor, CblasNoTrans, ap2, lda, ap1, 1, ret); } - else { /* (PyArray_NDIM(ap1) == 2 && PyArray_NDIM(ap2) == 2) */ - /* Matrix matrix multiplication -- Level 3 BLAS */ + else { + /* + * (PyArray_NDIM(ap1) == 2 && PyArray_NDIM(ap2) == 2) + * Matrix matrix multiplication -- Level 3 BLAS + */ lda = (PyArray_DIM(ap1, 1) > 1 ? PyArray_DIM(ap1, 1) : 1); ldb = (PyArray_DIM(ap2, 1) > 1 ? PyArray_DIM(ap2, 1) : 1); gemm(typenum, CblasRowMajor, CblasNoTrans, CblasTrans, PyArray_DIM(ap1, 0), PyArray_DIM(ap2, 0), PyArray_DIM(ap1, 1), ap1, lda, ap2, ldb, ret); } - NPY_END_ALLOW_THREADS - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} - - -/* - * vdot(a,b) - * - * Returns the dot product of a and b for scalars and vectors of - * floating point and complex types. The first argument, a, is conjugated. - */ -static PyObject *dotblas_vdot(PyObject *NPY_UNUSED(dummy), PyObject *args) { - PyObject *op1, *op2; - PyArrayObject *ap1=NULL, *ap2=NULL, *ret=NULL; - int l; - int typenum; - npy_intp dimensions[NPY_MAXDIMS]; - PyArray_Descr *type; - - if (!PyArg_ParseTuple(args, "OO", &op1, &op2)) return NULL; - - /* - * Conjugating dot product using the BLAS for vectors. - * Multiplies op1 and op2, each of which must be vector. - */ - - typenum = PyArray_ObjectType(op1, 0); - typenum = PyArray_ObjectType(op2, typenum); - - type = PyArray_DescrFromType(typenum); - Py_INCREF(type); - ap1 = (PyArrayObject *)PyArray_FromAny(op1, type, 0, 0, 0, NULL); - if (ap1==NULL) {Py_DECREF(type); goto fail;} - op1 = PyArray_Flatten(ap1, 0); - if (op1==NULL) {Py_DECREF(type); goto fail;} - Py_DECREF(ap1); - ap1 = (PyArrayObject *)op1; - - ap2 = (PyArrayObject *)PyArray_FromAny(op2, type, 0, 0, 0, NULL); - if (ap2==NULL) goto fail; - op2 = PyArray_Flatten(ap2, 0); - if (op2 == NULL) goto fail; - Py_DECREF(ap2); - ap2 = (PyArrayObject *)op2; - - if (typenum != NPY_FLOAT && typenum != NPY_DOUBLE && - typenum != NPY_CFLOAT && typenum != NPY_CDOUBLE) { - if (!altered) { - /* need to alter dot product */ - PyObject *tmp1, *tmp2; - tmp1 = PyTuple_New(0); - tmp2 = dotblas_alterdot(NULL, tmp1); - Py_DECREF(tmp1); - Py_DECREF(tmp2); - } - if (PyTypeNum_ISCOMPLEX(typenum)) { - op1 = PyArray_Conjugate(ap1, NULL); - if (op1==NULL) goto fail; - Py_DECREF(ap1); - ap1 = (PyArrayObject *)op1; - } - ret = (PyArrayObject *)PyArray_InnerProduct((PyObject *)ap1, - (PyObject *)ap2); - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - } - - if (PyArray_DIM(ap2, 0) != PyArray_DIM(ap1, PyArray_NDIM(ap1)-1)) { - PyErr_SetString(PyExc_ValueError, "vectors have different lengths"); - goto fail; - } - l = PyArray_DIM(ap1, PyArray_NDIM(ap1)-1); - - ret = (PyArrayObject *)PyArray_SimpleNew(0, dimensions, typenum); - if (ret == NULL) goto fail; - - NPY_BEGIN_ALLOW_THREADS; - - /* Dot product between two vectors -- Level 1 BLAS */ - if (typenum == NPY_DOUBLE || typenum == NPY_FLOAT) { - blas_dot(typenum, l, PyArray_DATA(ap1), PyArray_ITEMSIZE(ap1), - PyArray_DATA(ap2), PyArray_ITEMSIZE(ap2), PyArray_DATA(ret)); - } - else if (typenum == NPY_CDOUBLE) { - cblas_zdotc_sub(l, (double *)PyArray_DATA(ap1), 1, - (double *)PyArray_DATA(ap2), 1, (double *)PyArray_DATA(ret)); - } - else if (typenum == NPY_CFLOAT) { - cblas_cdotc_sub(l, (float *)PyArray_DATA(ap1), 1, - (float *)PyArray_DATA(ap2), 1, (float *)PyArray_DATA(ret)); - } - NPY_END_ALLOW_THREADS; Py_DECREF(ap1); @@ -1189,65 +810,3 @@ static PyObject *dotblas_vdot(PyObject *NPY_UNUSED(dummy), PyObject *args) { Py_XDECREF(ret); return NULL; } - -static struct PyMethodDef dotblas_module_methods[] = { - {"dot", (PyCFunction)dotblas_matrixproduct, METH_VARARGS|METH_KEYWORDS, NULL}, - {"inner", (PyCFunction)dotblas_innerproduct, 1, NULL}, - {"vdot", (PyCFunction)dotblas_vdot, 1, NULL}, - {"alterdot", (PyCFunction)dotblas_alterdot, 1, NULL}, - {"restoredot", (PyCFunction)dotblas_restoredot, 1, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - -#if defined(NPY_PY3K) -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_dotblas", - NULL, - -1, - dotblas_module_methods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -/* Initialization function for the module */ -#if defined(NPY_PY3K) -#define RETVAL m -PyMODINIT_FUNC PyInit__dotblas(void) -#else -#define RETVAL -PyMODINIT_FUNC init_dotblas(void) -#endif -{ -#if defined(NPY_PY3K) - int i; - - PyObject *d, *s, *m; - m = PyModule_Create(&moduledef); -#else - int i; - - PyObject *d, *s; - Py_InitModule3("_dotblas", dotblas_module_methods, module_doc); -#endif - - /* add the functions */ - - /* Import the array object */ - import_array(); - - /* Initialise the array of dot functions */ - for (i = 0; i < NPY_NTYPES; i++) - oldFunctions[i] = NULL; - - /* alterdot at load */ - d = PyTuple_New(0); - s = dotblas_alterdot(NULL, d); - Py_DECREF(d); - Py_DECREF(s); - - return RETVAL; -} diff --git a/numpy/core/src/multiarray/cblasfuncs.h b/numpy/core/src/multiarray/cblasfuncs.h new file mode 100644 index 000000000..d3ec08db6 --- /dev/null +++ b/numpy/core/src/multiarray/cblasfuncs.h @@ -0,0 +1,10 @@ +#ifndef _NPY_CBLASFUNCS_H_ +#define _NPY_CBLASFUNCS_H_ + +NPY_NO_EXPORT PyObject * +cblas_matrixproduct(int, PyArrayObject *, PyArrayObject *, PyArrayObject *); + +NPY_NO_EXPORT PyObject * +cblas_innerproduct(int, PyArrayObject *, PyArrayObject *); + +#endif diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c index 2b3d3c3d2..816778b91 100644 --- a/numpy/core/src/multiarray/common.c +++ b/numpy/core/src/multiarray/common.c @@ -84,7 +84,7 @@ PyArray_GetAttrString_SuppressException(PyObject *obj, char *name) -NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING = NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND; +NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING = NPY_SAME_KIND_CASTING; NPY_NO_EXPORT PyArray_Descr * @@ -518,12 +518,20 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims, return 0; } + /* + * fails if convertable to list but no len is defined which some libraries + * require to get object arrays + */ + size = PySequence_Size(obj); + if (size < 0) { + goto fail; + } + /* Recursive case, first check the sequence contains only one type */ seq = PySequence_Fast(obj, "Could not convert object to sequence"); if (seq == NULL) { goto fail; } - size = PySequence_Fast_GET_SIZE(seq); objects = PySequence_Fast_ITEMS(seq); common_type = size > 0 ? Py_TYPE(objects[0]) : NULL; for (i = 1; i < size; ++i) { @@ -676,7 +684,7 @@ _IsAligned(PyArrayObject *ap) /* alignment 1 types should have a efficient alignment for copy loops */ if (PyArray_ISFLEXIBLE(ap) || PyArray_ISSTRING(ap)) { - alignment = 16; + alignment = NPY_MAX_COPY_ALIGNMENT; } if (alignment == 1) { @@ -787,7 +795,7 @@ offset_bounds_from_strides(const int itemsize, const int nd, * @param Dimensionality of the shape * @param npy_intp pointer to shape array * @param String to append after the shape `(1, 2)%s`. - * + * * @return Python unicode string */ NPY_NO_EXPORT PyObject * @@ -836,7 +844,59 @@ convert_shape_to_string(npy_intp n, npy_intp *vals, char *ending) } else { tmp = PyUString_FromFormat(")%s", ending); - } + } PyUString_ConcatAndDel(&ret, tmp); return ret; } + + +NPY_NO_EXPORT void +dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j) +{ + PyObject *errmsg = NULL, *format = NULL, *fmt_args = NULL, + *i_obj = NULL, *j_obj = NULL, + *shape1 = NULL, *shape2 = NULL, + *shape1_i = NULL, *shape2_j = NULL; + + format = PyUString_FromString("shapes %s and %s not aligned:" + " %d (dim %d) != %d (dim %d)"); + + shape1 = convert_shape_to_string(PyArray_NDIM(a), PyArray_DIMS(a), ""); + shape2 = convert_shape_to_string(PyArray_NDIM(b), PyArray_DIMS(b), ""); + + i_obj = PyLong_FromLong(i); + j_obj = PyLong_FromLong(j); + + shape1_i = PyLong_FromSsize_t(PyArray_DIM(a, i)); + shape2_j = PyLong_FromSsize_t(PyArray_DIM(b, j)); + + if (!format || !shape1 || !shape2 || !i_obj || !j_obj || + !shape1_i || !shape2_j) { + goto end; + } + + fmt_args = PyTuple_Pack(6, shape1, shape2, + shape1_i, i_obj, shape2_j, j_obj); + if (fmt_args == NULL) { + goto end; + } + + errmsg = PyUString_Format(format, fmt_args); + if (errmsg != NULL) { + PyErr_SetObject(PyExc_ValueError, errmsg); + } + else { + PyErr_SetString(PyExc_ValueError, "shapes are not aligned"); + } + +end: + Py_XDECREF(errmsg); + Py_XDECREF(fmt_args); + Py_XDECREF(format); + Py_XDECREF(i_obj); + Py_XDECREF(j_obj); + Py_XDECREF(shape1); + Py_XDECREF(shape2); + Py_XDECREF(shape1_i); + Py_XDECREF(shape2_j); +} diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h index 6b49d6b4c..9cf2e27bf 100644 --- a/numpy/core/src/multiarray/common.h +++ b/numpy/core/src/multiarray/common.h @@ -3,6 +3,7 @@ #include <numpy/npy_common.h> #include <numpy/npy_cpu.h> #include <numpy/ndarraytypes.h> +#include <limits.h> #define error_converting(x) (((x) == -1) && PyErr_Occurred()) @@ -72,6 +73,13 @@ offset_bounds_from_strides(const int itemsize, const int nd, NPY_NO_EXPORT PyObject * convert_shape_to_string(npy_intp n, npy_intp *vals, char *ending); +/* + * Sets ValueError with "matrices not aligned" message for np.dot and friends + * when a.shape[i] should match b.shape[j], but doesn't. + */ +NPY_NO_EXPORT void +dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j); + /* * Returns -1 and sets an exception if *index is an invalid index for @@ -208,6 +216,35 @@ _is_basic_python_type(PyObject * obj) return 0; } +/* + * Convert NumPy stride to BLAS stride. Returns 0 if conversion cannot be done + * (BLAS won't handle negative or zero strides the way we want). + */ +static NPY_INLINE int +blas_stride(npy_intp stride, unsigned itemsize) +{ + /* + * Should probably check pointer alignment also, but this may cause + * problems if we require complex to be 16 byte aligned. + */ + if (stride > 0 && npy_is_aligned((void *)stride, itemsize)) { + stride /= itemsize; + if (stride <= INT_MAX) { + return stride; + } + } + return 0; +} + +/* + * Define a chunksize for CBLAS. CBLAS counts in integers. + */ +#if NPY_MAX_INTP > INT_MAX +# define NPY_CBLAS_CHUNK (INT_MAX / 2 + 1) +#else +# define NPY_CBLAS_CHUNK NPY_MAX_INTP +#endif + #include "ucsnarrow.h" #endif diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c index b84dff864..096a363f1 100644 --- a/numpy/core/src/multiarray/conversion_utils.c +++ b/numpy/core/src/multiarray/conversion_utils.c @@ -16,6 +16,11 @@ #include "conversion_utils.h" +static int +PyArray_PyIntAsInt_ErrMsg(PyObject *o, const char * msg) NPY_GCC_NONNULL(2); +static npy_intp +PyArray_PyIntAsIntp_ErrMsg(PyObject *o, const char * msg) NPY_GCC_NONNULL(2); + /**************************************************************** * Useful function for conversion when used with PyArg_ParseTuple ****************************************************************/ @@ -111,7 +116,7 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) } if (len > NPY_MAXDIMS) { PyErr_Format(PyExc_ValueError, "sequence too large; " - "must be smaller than %d", NPY_MAXDIMS); + "cannot be greater than %d", NPY_MAXDIMS); return NPY_FAIL; } if (len > 0) { @@ -215,8 +220,9 @@ PyArray_AxisConverter(PyObject *obj, int *axis) *axis = NPY_MAXDIMS; } else { - *axis = PyArray_PyIntAsInt(obj); - if (PyErr_Occurred()) { + *axis = PyArray_PyIntAsInt_ErrMsg(obj, + "an integer is required for the axis"); + if (error_converting(*axis)) { return NPY_FAIL; } } @@ -251,7 +257,8 @@ PyArray_ConvertMultiAxis(PyObject *axis_in, int ndim, npy_bool *out_axis_flags) } for (i = 0; i < naxes; ++i) { PyObject *tmp = PyTuple_GET_ITEM(axis_in, i); - int axis = PyArray_PyIntAsInt(tmp); + int axis = PyArray_PyIntAsInt_ErrMsg(tmp, + "integers are required for the axis tuple elements"); int axis_orig = axis; if (error_converting(axis)) { return NPY_FAIL; @@ -281,7 +288,8 @@ PyArray_ConvertMultiAxis(PyObject *axis_in, int ndim, npy_bool *out_axis_flags) memset(out_axis_flags, 0, ndim); - axis = PyArray_PyIntAsInt(axis_in); + axis = PyArray_PyIntAsInt_ErrMsg(axis_in, + "an integer is required for the axis"); axis_orig = axis; if (error_converting(axis)) { @@ -689,7 +697,7 @@ PyArray_CastingConverter(PyObject *obj, NPY_CASTING *casting) return ret; } - if (PyBytes_AsStringAndSize(obj, &str, &length) == -1) { + if (PyBytes_AsStringAndSize(obj, &str, &length) < 0) { return 0; } @@ -736,13 +744,12 @@ PyArray_CastingConverter(PyObject *obj, NPY_CASTING *casting) * Other conversion functions *****************************/ -/*NUMPY_API*/ -NPY_NO_EXPORT int -PyArray_PyIntAsInt(PyObject *o) +static int +PyArray_PyIntAsInt_ErrMsg(PyObject *o, const char * msg) { npy_intp long_value; /* This assumes that NPY_SIZEOF_INTP >= NPY_SIZEOF_INT */ - long_value = PyArray_PyIntAsIntp(o); + long_value = PyArray_PyIntAsIntp_ErrMsg(o, msg); #if (NPY_SIZEOF_INTP > NPY_SIZEOF_INT) if ((long_value < INT_MIN) || (long_value > INT_MAX)) { @@ -754,8 +761,14 @@ PyArray_PyIntAsInt(PyObject *o) } /*NUMPY_API*/ -NPY_NO_EXPORT npy_intp -PyArray_PyIntAsIntp(PyObject *o) +NPY_NO_EXPORT int +PyArray_PyIntAsInt(PyObject *o) +{ + return PyArray_PyIntAsInt_ErrMsg(o, "an integer is required"); +} + +static npy_intp +PyArray_PyIntAsIntp_ErrMsg(PyObject *o, const char * msg) { #if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP) long long long_value = -1; @@ -763,7 +776,6 @@ PyArray_PyIntAsIntp(PyObject *o) long long_value = -1; #endif PyObject *obj, *err; - static char *msg = "an integer is required"; if (!o) { PyErr_SetString(PyExc_TypeError, msg); @@ -909,6 +921,13 @@ PyArray_PyIntAsIntp(PyObject *o) return long_value; } +/*NUMPY_API*/ +NPY_NO_EXPORT npy_intp +PyArray_PyIntAsIntp(PyObject *o) +{ + return PyArray_PyIntAsIntp_ErrMsg(o, "an integer is required"); +} + /* * PyArray_IntpFromIndexSequence diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index 1db3bfe85..fa5fb6b67 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -624,63 +624,19 @@ type_num_unsigned_to_signed(int type_num) } } -/* - * NOTE: once the UNSAFE_CASTING -> SAME_KIND_CASTING transition is over, - * we should remove NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND - * and PyArray_CanCastTypeTo_impl should be renamed back to - * PyArray_CanCastTypeTo. - */ -static npy_bool -PyArray_CanCastTypeTo_impl(PyArray_Descr *from, PyArray_Descr *to, - NPY_CASTING casting); - /*NUMPY_API * Returns true if data of type 'from' may be cast to data of type * 'to' according to the rule 'casting'. */ NPY_NO_EXPORT npy_bool PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, - NPY_CASTING casting) -{ - /* fast path for basic types */ - if (NPY_LIKELY(from->type_num < NPY_OBJECT) && - NPY_LIKELY(from->type_num == to->type_num) && - NPY_LIKELY(from->byteorder == to->byteorder)) { - return 1; - } - else if (casting == NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND) { - npy_bool unsafe_ok, same_kind_ok; - unsafe_ok = PyArray_CanCastTypeTo_impl(from, to, NPY_UNSAFE_CASTING); - same_kind_ok = PyArray_CanCastTypeTo_impl(from, to, - NPY_SAME_KIND_CASTING); - if (unsafe_ok && !same_kind_ok) { - char * msg = "Implicitly casting between incompatible kinds. In " - "a future numpy release, this will raise an error. " - "Use casting=\"unsafe\" if this is intentional."; - if (DEPRECATE(msg) < 0) { - /* We have no way to propagate an exception :-( */ - PyErr_Clear(); - PySys_WriteStderr("Sorry, you requested this warning " - "be raised as an error, but we couldn't " - "do it. (See issue #3806 in the numpy " - "bug tracker.) So FYI, it was: " - "DeprecationWarning: %s\n", - msg); - } - } - return unsafe_ok; - } - else { - return PyArray_CanCastTypeTo_impl(from, to, casting); - } -} - -static npy_bool -PyArray_CanCastTypeTo_impl(PyArray_Descr *from, PyArray_Descr *to, NPY_CASTING casting) { - /* If unsafe casts are allowed */ - if (casting == NPY_UNSAFE_CASTING) { + /* Fast path for unsafe casts or basic types */ + if (casting == NPY_UNSAFE_CASTING || + (NPY_LIKELY(from->type_num < NPY_OBJECT) && + NPY_LIKELY(from->type_num == to->type_num) && + NPY_LIKELY(from->byteorder == to->byteorder))) { return 1; } /* Equivalent types can be cast with any value of 'casting' */ diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index d93995c8a..c57df147a 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -12,6 +12,7 @@ #include "npy_config.h" #include "npy_pycompat.h" +#include "multiarraymodule.h" #include "common.h" #include "ctors.h" @@ -1054,12 +1055,12 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd, fa->data = data; /* - * If the strides were provided to the function, need to - * update the flags to get the right CONTIGUOUS, ALIGN properties + * always update the flags to get the right CONTIGUOUS, ALIGN properties + * not owned data and input strides may not be aligned and on some + * platforms (debian sparc) malloc does not provide enough alignment for + * long double types */ - if (strides != NULL) { - PyArray_UpdateFlags((PyArrayObject *)fa, NPY_ARRAY_UPDATE_ALL); - } + PyArray_UpdateFlags((PyArrayObject *)fa, NPY_ARRAY_UPDATE_ALL); /* * call the __array_finalize__ @@ -1069,7 +1070,7 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd, if ((subtype != &PyArray_Type)) { PyObject *res, *func, *args; - func = PyObject_GetAttrString((PyObject *)fa, "__array_finalize__"); + func = PyObject_GetAttr((PyObject *)fa, npy_ma_str_array_finalize); if (func && func != Py_None) { if (NpyCapsule_Check(func)) { /* A C-function is stored here */ @@ -3368,7 +3369,7 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, #endif ) { PyObject *newbuf; - newbuf = PyObject_GetAttrString(buf, "__buffer__"); + newbuf = PyObject_GetAttr(buf, npy_ma_str_buffer); if (newbuf == NULL) { Py_DECREF(type); return NULL; diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c index cd52756b9..a870650fc 100644 --- a/numpy/core/src/multiarray/datetime.c +++ b/numpy/core/src/multiarray/datetime.c @@ -2373,7 +2373,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj, bytes = obj; Py_INCREF(bytes); } - if (PyBytes_AsStringAndSize(bytes, &str, &len) == -1) { + if (PyBytes_AsStringAndSize(bytes, &str, &len) < 0) { Py_DECREF(bytes); return -1; } @@ -2563,7 +2563,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, bytes = obj; Py_INCREF(bytes); } - if (PyBytes_AsStringAndSize(bytes, &str, &len) == -1) { + if (PyBytes_AsStringAndSize(bytes, &str, &len) < 0) { Py_DECREF(bytes); return -1; } diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index 8b55c9fbd..9cf66020d 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -2369,11 +2369,8 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args) { int elsize = -1, alignment = -1; int version = 4; -#if defined(NPY_PY3K) - int endian; -#else char endian; -#endif + PyObject *endian_obj; PyObject *subarray, *fields, *names = NULL, *metadata=NULL; int incref_names = 1; int int_dtypeflags = 0; @@ -2390,68 +2387,39 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args) } switch (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0))) { case 9: -#if defined(NPY_PY3K) -#define _ARGSTR_ "(iCOOOiiiO)" -#else -#define _ARGSTR_ "(icOOOiiiO)" -#endif - if (!PyArg_ParseTuple(args, _ARGSTR_, &version, &endian, + if (!PyArg_ParseTuple(args, "(iOOOOiiiO)", &version, &endian_obj, &subarray, &names, &fields, &elsize, &alignment, &int_dtypeflags, &metadata)) { + PyErr_Clear(); return NULL; -#undef _ARGSTR_ } break; case 8: -#if defined(NPY_PY3K) -#define _ARGSTR_ "(iCOOOiii)" -#else -#define _ARGSTR_ "(icOOOiii)" -#endif - if (!PyArg_ParseTuple(args, _ARGSTR_, &version, &endian, + if (!PyArg_ParseTuple(args, "(iOOOOiii)", &version, &endian_obj, &subarray, &names, &fields, &elsize, &alignment, &int_dtypeflags)) { return NULL; -#undef _ARGSTR_ } break; case 7: -#if defined(NPY_PY3K) -#define _ARGSTR_ "(iCOOOii)" -#else -#define _ARGSTR_ "(icOOOii)" -#endif - if (!PyArg_ParseTuple(args, _ARGSTR_, &version, &endian, + if (!PyArg_ParseTuple(args, "(iOOOOii)", &version, &endian_obj, &subarray, &names, &fields, &elsize, &alignment)) { return NULL; -#undef _ARGSTR_ } break; case 6: -#if defined(NPY_PY3K) -#define _ARGSTR_ "(iCOOii)" -#else -#define _ARGSTR_ "(icOOii)" -#endif - if (!PyArg_ParseTuple(args, _ARGSTR_, &version, - &endian, &subarray, &fields, + if (!PyArg_ParseTuple(args, "(iOOOii)", &version, + &endian_obj, &subarray, &fields, &elsize, &alignment)) { - PyErr_Clear(); -#undef _ARGSTR_ + return NULL; } break; case 5: version = 0; -#if defined(NPY_PY3K) -#define _ARGSTR_ "(COOii)" -#else -#define _ARGSTR_ "(cOOii)" -#endif - if (!PyArg_ParseTuple(args, _ARGSTR_, - &endian, &subarray, &fields, &elsize, + if (!PyArg_ParseTuple(args, "(OOOii)", + &endian_obj, &subarray, &fields, &elsize, &alignment)) { -#undef _ARGSTR_ return NULL; } break; @@ -2494,11 +2462,55 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args) } } + /* Parse endian */ + if (PyUnicode_Check(endian_obj) || PyBytes_Check(endian_obj)) { + PyObject *tmp = NULL; + char *str; + Py_ssize_t len; + + if (PyUnicode_Check(endian_obj)) { + tmp = PyUnicode_AsASCIIString(endian_obj); + if (tmp == NULL) { + return NULL; + } + endian_obj = tmp; + } + + if (PyBytes_AsStringAndSize(endian_obj, &str, &len) < 0) { + Py_XDECREF(tmp); + return NULL; + } + if (len != 1) { + PyErr_SetString(PyExc_ValueError, + "endian is not 1-char string in Numpy dtype unpickling"); + Py_XDECREF(tmp); + return NULL; + } + endian = str[0]; + Py_XDECREF(tmp); + } + else { + PyErr_SetString(PyExc_ValueError, + "endian is not a string in Numpy dtype unpickling"); + return NULL; + } if ((fields == Py_None && names != Py_None) || (names == Py_None && fields != Py_None)) { PyErr_Format(PyExc_ValueError, - "inconsistent fields and names"); + "inconsistent fields and names in Numpy dtype unpickling"); + return NULL; + } + + if (names != Py_None && !PyTuple_Check(names)) { + PyErr_Format(PyExc_ValueError, + "non-tuple names in Numpy dtype unpickling"); + return NULL; + } + + if (fields != Py_None && !PyDict_Check(fields)) { + PyErr_Format(PyExc_ValueError, + "non-dict fields in Numpy dtype unpickling"); return NULL; } @@ -2563,13 +2575,82 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args) } if (fields != Py_None) { - Py_XDECREF(self->fields); - self->fields = fields; - Py_INCREF(fields); - Py_XDECREF(self->names); - self->names = names; - if (incref_names) { - Py_INCREF(names); + /* + * Ensure names are of appropriate string type + */ + Py_ssize_t i; + int names_ok = 1; + PyObject *name; + + for (i = 0; i < PyTuple_GET_SIZE(names); ++i) { + name = PyTuple_GET_ITEM(names, i); + if (!PyUString_Check(name)) { + names_ok = 0; + break; + } + } + + if (names_ok) { + Py_XDECREF(self->fields); + self->fields = fields; + Py_INCREF(fields); + Py_XDECREF(self->names); + self->names = names; + if (incref_names) { + Py_INCREF(names); + } + } + else { +#if defined(NPY_PY3K) + /* + * To support pickle.load(f, encoding='bytes') for loading Py2 + * generated pickles on Py3, we need to be more lenient and convert + * field names from byte strings to unicode. + */ + PyObject *tmp, *new_name, *field; + + tmp = PyDict_New(); + if (tmp == NULL) { + return NULL; + } + Py_XDECREF(self->fields); + self->fields = tmp; + + tmp = PyTuple_New(PyTuple_GET_SIZE(names)); + if (tmp == NULL) { + return NULL; + } + Py_XDECREF(self->names); + self->names = tmp; + + for (i = 0; i < PyTuple_GET_SIZE(names); ++i) { + name = PyTuple_GET_ITEM(names, i); + field = PyDict_GetItem(fields, name); + if (!field) { + return NULL; + } + + if (PyUnicode_Check(name)) { + new_name = name; + Py_INCREF(new_name); + } + else { + new_name = PyUnicode_FromEncodedObject(name, "ASCII", "strict"); + if (new_name == NULL) { + return NULL; + } + } + + PyTuple_SET_ITEM(self->names, i, new_name); + if (PyDict_SetItem(self->fields, new_name, field) != 0) { + return NULL; + } + } +#else + PyErr_Format(PyExc_ValueError, + "non-string names in Numpy dtype unpickling"); + return NULL; +#endif } } diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index b2bf17f4c..cd0ae1680 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -265,6 +265,11 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, "put: first argument must be an array"); return NULL; } + + if (PyArray_FailUnlessWriteable(self, "put: output array") < 0) { + return NULL; + } + if (!PyArray_ISCONTIGUOUS(self)) { PyArrayObject *obj; int flags = NPY_ARRAY_CARRAY | NPY_ARRAY_UPDATEIFCOPY; diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src index b9063273f..38e7656f3 100644 --- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src @@ -1490,7 +1490,9 @@ mapiter_@name@(PyArrayMapIterObject *mit) /* Constant information */ npy_intp fancy_dims[NPY_MAXDIMS]; npy_intp fancy_strides[NPY_MAXDIMS]; +#if @isget@ int iteraxis; +#endif char *baseoffset = mit->baseoffset; char **outer_ptrs = mit->outer_ptrs; @@ -1498,7 +1500,9 @@ mapiter_@name@(PyArrayMapIterObject *mit) PyArrayObject *array= mit->array; /* Fill constant information */ +#if @isget@ iteraxis = mit->iteraxes[0]; +#endif for (i = 0; i < numiter; i++) { fancy_dims[i] = mit->fancy_dims[i]; fancy_strides[i] = mit->fancy_strides[i]; diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index e2b8ef700..40622ca61 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -206,14 +206,17 @@ prepare_index(PyArrayObject *self, PyObject *index, n = 0; make_tuple = 1; } - n = PySequence_Size(index); + else { + n = PySequence_Size(index); + } if (n < 0 || n >= NPY_MAXDIMS) { n = 0; } for (i = 0; i < n; i++) { PyObject *tmp_obj = PySequence_GetItem(index, i); if (tmp_obj == NULL) { - make_tuple = 1; + PyErr_Clear(); + make_tuple = 0; break; } if (PyArray_Check(tmp_obj) || PySequence_Check(tmp_obj) @@ -1047,7 +1050,7 @@ array_boolean_subscript(PyArrayObject *self, Py_INCREF(dtype); ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self), dtype, 1, &size, PyArray_STRIDES(ret), PyArray_BYTES(ret), - 0, (PyObject *)self); + PyArray_FLAGS(self), (PyObject *)self); if (ret == NULL) { Py_DECREF(tmp); @@ -1221,7 +1224,7 @@ array_assign_boolean_subscript(PyArrayObject *self, if (needs_api) { /* - * FIXME?: most assignment operations stop after the first occurance + * FIXME?: most assignment operations stop after the first occurrence * of an error. Boolean does not currently, but should at least * report the error. (This is only relevant for things like str->int * casts which call into python) @@ -1436,7 +1439,7 @@ array_subscript(PyArrayObject *self, PyObject *op) /* * TODO: Should this be a view or not? The only reason not would be * optimization (i.e. of array[...] += 1) I think. - * Before, it was just self for a single Ellipis. + * Before, it was just self for a single ellipsis. */ result = PyArray_View(self, NULL, NULL); /* A single ellipsis, so no need to decref */ @@ -1569,7 +1572,7 @@ array_subscript(PyArrayObject *self, PyObject *op) PyArray_SHAPE(tmp_arr), PyArray_STRIDES(tmp_arr), PyArray_BYTES(tmp_arr), - 0, /* TODO: Flags? */ + PyArray_FLAGS(self), (PyObject *)self); if (result == NULL) { @@ -1656,6 +1659,58 @@ array_assign_item(PyArrayObject *self, Py_ssize_t i, PyObject *op) /* + * This fallback takes the old route of `arr.flat[index] = values` + * for one dimensional `arr`. The route can sometimes fail slightly + * differently (ValueError instead of IndexError), in which case we + * warn users about the change. But since it does not actually care *at all* + * about shapes, it should only fail for out of bound indexes or + * casting errors. + */ +NPY_NO_EXPORT int +attempt_1d_fallback(PyArrayObject *self, PyObject *ind, PyObject *op) +{ + PyObject *err = PyErr_Occurred(); + PyArrayIterObject *self_iter = NULL; + + Py_INCREF(err); + PyErr_Clear(); + + self_iter = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); + if (self_iter == NULL) { + goto fail; + } + if (iter_ass_subscript(self_iter, ind, op) < 0) { + goto fail; + } + + Py_XDECREF((PyObject *)self_iter); + Py_DECREF(err); + + if (DEPRECATE( + "assignment will raise an error in the future, most likely " + "because your index result shape does not match the value array " + "shape. You can use `arr.flat[index] = values` to keep the old " + "behaviour.") < 0) { + return -1; + } + return 0; + + fail: + if (!PyErr_ExceptionMatches(err)) { + PyObject *err, *val, *tb; + PyErr_Fetch(&err, &val, &tb); + DEPRECATE_FUTUREWARNING( + "assignment exception type will change in the future"); + PyErr_Restore(err, val, tb); + } + + Py_XDECREF((PyObject *)self_iter); + Py_DECREF(err); + return -1; +} + + +/* * General assignment with python indexing objects. */ static int @@ -1746,9 +1801,21 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) Py_INCREF(op); tmp_arr = (PyArrayObject *)op; } + if (array_assign_boolean_subscript(self, (PyArrayObject *)indices[0].object, tmp_arr, NPY_CORDER) < 0) { + /* + * Deprecated case. The old boolean indexing seemed to have some + * check to allow wrong dimensional boolean arrays in all cases. + */ + if (PyArray_NDIM(tmp_arr) > 1) { + if (attempt_1d_fallback(self, indices[0].object, + (PyObject*)tmp_arr) < 0) { + goto fail; + } + goto success; + } goto fail; } goto success; @@ -1899,14 +1966,36 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) tmp_arr, descr); if (mit == NULL) { - goto fail; + /* + * This is a deprecated special case to allow non-matching shapes + * for the index and value arrays. + */ + if (index_type != HAS_FANCY || index_num != 1) { + /* This is not a "flat like" 1-d special case */ + goto fail; + } + if (attempt_1d_fallback(self, indices[0].object, op) < 0) { + goto fail; + } + goto success; } if (tmp_arr == NULL) { /* Fill extra op */ if (PyArray_CopyObject(mit->extra_op, op) < 0) { - goto fail; + /* + * This is a deprecated special case to allow non-matching shapes + * for the index and value arrays. + */ + if (index_type != HAS_FANCY || index_num != 1) { + /* This is not a "flat like" 1-d special case */ + goto fail; + } + if (attempt_1d_fallback(self, indices[0].object, op) < 0) { + goto fail; + } + goto success; } } @@ -2357,7 +2446,7 @@ PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit) NPY_BEGIN_THREADS_DEF; if (mit->size == 0) { - /* All indices got broadcasted away, do *not* check as it always was */ + /* All indices got broadcast away, do *not* check as it always was */ return 0; } @@ -2580,7 +2669,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, * 1. No subspace iteration is necessary, so the extra_op can * be included into the index iterator (it will be buffered) * 2. Subspace iteration is necessary, so the extra op is iterated - * independendly, and the iteration order is fixed at C (could + * independently, and the iteration order is fixed at C (could * also use Fortran order if the array is Fortran order). * In this case the subspace iterator is not buffered. * @@ -2773,7 +2862,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, NPY_ITER_GROWINNER; /* - * For a single 1-d operand, guarantee itertion order + * For a single 1-d operand, guarantee iteration order * (scipy used this). Note that subspace may be used. */ if ((mit->numiter == 1) && (PyArray_NDIM(index_arrays[0]) == 1)) { @@ -2985,7 +3074,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, fail: /* - * Check whether the operand was not broadcastable and replace the error + * Check whether the operand could not be broadcast and replace the error * in that case. This should however normally be found early with a * direct goto to broadcast_error */ @@ -3000,7 +3089,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, /* (j < 0 is currently impossible, extra_op is reshaped) */ j >= 0 && PyArray_DIM(extra_op, i) != mit->dimensions[j]) { - /* extra_op cannot be broadcasted to the indexing result */ + /* extra_op cannot be broadcast to the indexing result */ goto broadcast_error; } } @@ -3060,7 +3149,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, * that most of this public API is currently not guaranteed * to stay the same between versions. If you plan on using * it, please consider adding more utility functions here - * to accomodate new features. + * to accommodate new features. */ NPY_NO_EXPORT PyObject * PyArray_MapIterArray(PyArrayObject * a, PyObject * index) diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index 5fab174ba..851a37456 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -9,9 +9,8 @@ #include "numpy/arrayscalars.h" #include "npy_config.h" - #include "npy_pycompat.h" - +#include "ufunc_override.h" #include "common.h" #include "ctors.h" #include "calculation.h" @@ -1670,6 +1669,13 @@ array_setstate(PyArrayObject *self, PyObject *args) tmp = PyUnicode_AsLatin1String(rawdata); Py_DECREF(rawdata); rawdata = tmp; + if (tmp == NULL) { + /* More informative error message */ + PyErr_SetString(PyExc_ValueError, + ("Failed to encode latin1 string when unpickling a Numpy array. " + "pickle.load(a, encoding='latin1') is assumed.")); + return NULL; + } } #endif @@ -1680,7 +1686,7 @@ array_setstate(PyArrayObject *self, PyObject *args) return NULL; } - if (PyBytes_AsStringAndSize(rawdata, &datastr, &len)) { + if (PyBytes_AsStringAndSize(rawdata, &datastr, &len) < 0) { Py_DECREF(rawdata); return NULL; } @@ -1894,6 +1900,19 @@ array_dumps(PyArrayObject *self, PyObject *args) static PyObject * +array_sizeof(PyArrayObject *self) +{ + /* object + dimension and strides */ + Py_ssize_t nbytes = NPY_SIZEOF_PYARRAYOBJECT + + PyArray_NDIM(self) * sizeof(npy_intp) * 2; + if (PyArray_CHKFLAGS(self, NPY_ARRAY_OWNDATA)) { + nbytes += PyArray_NBYTES(self); + } + return PyLong_FromSsize_t(nbytes); +} + + +static PyObject * array_transpose(PyArrayObject *self, PyObject *args) { PyObject *shape = Py_None; @@ -1991,31 +2010,52 @@ array_cumprod(PyArrayObject *self, PyObject *args, PyObject *kwds) static PyObject * array_dot(PyArrayObject *self, PyObject *args, PyObject *kwds) { - PyObject *fname, *ret, *b, *out = NULL; - static PyObject *numpycore = NULL; - char * kwords[] = {"b", "out", NULL }; + static PyUFuncObject *cached_npy_dot = NULL; + int errval; + PyObject *override = NULL; + PyObject *a = (PyObject *)self, *b, *o = Py_None; + PyObject *newargs; + PyArrayObject *ret; + char* kwlist[] = {"b", "out", NULL }; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O", kwords, &b, &out)) { + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O", kwlist, &b, &o)) { return NULL; } - /* Since blas-dot is exposed only on the Python side, we need to grab it - * from there */ - if (numpycore == NULL) { - numpycore = PyImport_ImportModule("numpy.core"); - if (numpycore == NULL) { - return NULL; - } + if (cached_npy_dot == NULL) { + PyObject *module = PyImport_ImportModule("numpy.core.multiarray"); + cached_npy_dot = (PyUFuncObject*)PyDict_GetItemString( + PyModule_GetDict(module), "dot"); + + Py_INCREF(cached_npy_dot); + Py_DECREF(module); } - fname = PyUString_FromString("dot"); - if (out == NULL) { - ret = PyObject_CallMethodObjArgs(numpycore, fname, self, b, NULL); + + if ((newargs = PyTuple_Pack(3, a, b, o)) == NULL) { + return NULL; } - else { - ret = PyObject_CallMethodObjArgs(numpycore, fname, self, b, out, NULL); + errval = PyUFunc_CheckOverride(cached_npy_dot, "__call__", + newargs, NULL, &override, 2); + Py_DECREF(newargs); + + if (errval) { + return NULL; } - Py_DECREF(fname); - return ret; + else if (override) { + return override; + } + + if (o == Py_None) { + o = NULL; + } + if (o != NULL && !PyArray_Check(o)) { + PyErr_SetString(PyExc_TypeError, + "'out' must be an array"); + return NULL; + } + ret = (PyArrayObject *)PyArray_MatrixProduct2(a, b, (PyArrayObject *)o); + return PyArray_Return(ret); } @@ -2301,6 +2341,11 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { (PyCFunction)array_wraparray, METH_VARARGS, NULL}, + /* for the sys module */ + {"__sizeof__", + (PyCFunction) array_sizeof, + METH_NOARGS, NULL}, + /* for the copy module */ {"__copy__", (PyCFunction)array_copy_keeporder, diff --git a/numpy/core/src/multiarray/multiarray_tests.c.src b/numpy/core/src/multiarray/multiarray_tests.c.src index 9d4aabe31..77e699562 100644 --- a/numpy/core/src/multiarray/multiarray_tests.c.src +++ b/numpy/core/src/multiarray/multiarray_tests.c.src @@ -572,6 +572,42 @@ fail: return NULL; } +/* check no elison for avoided increfs */ +static PyObject * +incref_elide(PyObject *dummy, PyObject *args) +{ + PyObject *arg = NULL, *res, *tup; + if (!PyArg_ParseTuple(args, "O", &arg)) { + return NULL; + } + + /* refcount 1 array but should not be elided */ + arg = PyArray_NewCopy((PyArrayObject*)arg, NPY_KEEPORDER); + res = PyNumber_Add(arg, arg); + + /* return original copy, should be equal to input */ + tup = PyTuple_Pack(2, arg, res); + Py_DECREF(arg); + Py_DECREF(res); + return tup; +} + +/* check no elison for get from list without incref */ +static PyObject * +incref_elide_l(PyObject *dummy, PyObject *args) +{ + PyObject *arg = NULL, *r, *res; + if (!PyArg_ParseTuple(args, "O", &arg)) { + return NULL; + } + /* get item without increasing refcount, item may still be on the python + * stack but above the inaccessible top */ + r = PyList_GetItem(arg, 4); + res = PyNumber_Add(r, r); + + return res; +} + #if !defined(NPY_PY3K) static PyObject * @@ -858,6 +894,12 @@ static PyMethodDef Multiarray_TestsMethods[] = { {"test_inplace_increment", inplace_increment, METH_VARARGS, NULL}, + {"incref_elide", + incref_elide, + METH_VARARGS, NULL}, + {"incref_elide_l", + incref_elide_l, + METH_VARARGS, NULL}, #if !defined(NPY_PY3K) {"test_int_subclass", int_subclass, diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 682705a1b..70590662e 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -56,6 +56,10 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "common.h" #include "ufunc_override.h" #include "scalarmathmodule.h" /* for npy_mul_with_overflow_intp */ +#include "multiarraymodule.h" +#include "cblasfuncs.h" +#include "vdot.h" +#include "templ_common.h" /* Only here for API compatibility */ NPY_NO_EXPORT PyTypeObject PyBigArray_Type; @@ -69,8 +73,12 @@ PyArray_GetPriority(PyObject *obj, double default_) PyObject *ret; double priority = NPY_PRIORITY; - if (PyArray_CheckExact(obj)) + if (PyArray_CheckExact(obj)) { return priority; + } + else if (PyArray_CheckAnyScalarExact(obj)) { + return NPY_SCALAR_PRIORITY; + } ret = PyArray_GetAttrString_SuppressException(obj, "__array_priority__"); if (ret == NULL) { @@ -246,7 +254,7 @@ PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode) { npy_intp newd1; PyArray_Descr *descr; - char msg[] = "PyArray_As1D: use PyArray_AsCArray."; + static const char msg[] = "PyArray_As1D: use PyArray_AsCArray."; if (DEPRECATE(msg) < 0) { return -1; @@ -267,7 +275,7 @@ PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int typecode) { npy_intp newdims[2]; PyArray_Descr *descr; - char msg[] = "PyArray_As1D: use PyArray_AsCArray."; + static const char msg[] = "PyArray_As1D: use PyArray_AsCArray."; if (DEPRECATE(msg) < 0) { return -1; @@ -339,8 +347,9 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis) } if (ndim == 1 && axis != 0) { - char msg[] = "axis != 0 for ndim == 1; this will raise an error in " - "future versions of numpy"; + static const char msg[] = "axis != 0 for ndim == 1; " + "this will raise an error in " + "future versions of numpy"; if (DEPRECATE(msg) < 0) { return NULL; } @@ -576,6 +585,12 @@ PyArray_Concatenate(PyObject *op, int axis) PyArrayObject **arrays; PyArrayObject *ret; + if (!PySequence_Check(op)) { + PyErr_SetString(PyExc_TypeError, + "The first input argument needs to be a sequence"); + return NULL; + } + /* Convert the input list into arrays */ narrays = PySequence_Size(op); if (narrays < 0) { @@ -818,6 +833,9 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2) typenum = PyArray_ObjectType(op2, typenum); typec = PyArray_DescrFromType(typenum); + if (typec == NULL) { + return NULL; + } Py_INCREF(typec); ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, NPY_ARRAY_ALIGNED, NULL); @@ -828,8 +846,18 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2) ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, NPY_ARRAY_ALIGNED, NULL); if (ap2 == NULL) { - goto fail; + Py_DECREF(ap1); + return NULL; } + +#if defined(HAVE_CBLAS) + if (PyArray_NDIM(ap1) <= 2 && PyArray_NDIM(ap2) <= 2 && + (NPY_DOUBLE == typenum || NPY_CDOUBLE == typenum || + NPY_FLOAT == typenum || NPY_CFLOAT == typenum)) { + return cblas_innerproduct(typenum, ap1, ap2); + } +#endif + if (PyArray_NDIM(ap1) == 0 || PyArray_NDIM(ap2) == 0) { ret = (PyArray_NDIM(ap1) == 0 ? ap1 : ap2); ret = (PyArrayObject *)Py_TYPE(ret)->tp_as_number->nb_multiply( @@ -841,7 +869,8 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2) l = PyArray_DIMS(ap1)[PyArray_NDIM(ap1) - 1]; if (PyArray_DIMS(ap2)[PyArray_NDIM(ap2) - 1] != l) { - PyErr_SetString(PyExc_ValueError, "matrices are not aligned"); + dot_alignment_error(ap1, PyArray_NDIM(ap1) - 1, + ap2, PyArray_NDIM(ap2) - 1); goto fail; } @@ -875,7 +904,8 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2) } is1 = PyArray_STRIDES(ap1)[PyArray_NDIM(ap1) - 1]; is2 = PyArray_STRIDES(ap2)[PyArray_NDIM(ap2) - 1]; - op = PyArray_DATA(ret); os = PyArray_DESCR(ret)->elsize; + op = PyArray_DATA(ret); + os = PyArray_DESCR(ret)->elsize; axis = PyArray_NDIM(ap1) - 1; it1 = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *)ap1, &axis); axis = PyArray_NDIM(ap2) - 1; @@ -908,7 +938,17 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2) } /*NUMPY_API - * Numeric.matrixproduct(a,v,out) + * Numeric.matrixproduct(a,v) + * just like inner product but does the swapaxes stuff on the fly + */ +NPY_NO_EXPORT PyObject * +PyArray_MatrixProduct(PyObject *op1, PyObject *op2) +{ + return PyArray_MatrixProduct2(op1, op2, NULL); +} + +/*NUMPY_API + * Numeric.matrixproduct2(a,v,out) * just like inner product but does the swapaxes stuff on the fly */ NPY_NO_EXPORT PyObject * @@ -943,8 +983,18 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, NPY_ARRAY_ALIGNED, NULL); if (ap2 == NULL) { - goto fail; + Py_DECREF(ap1); + return NULL; + } + +#if defined(HAVE_CBLAS) + if (PyArray_NDIM(ap1) <= 2 && PyArray_NDIM(ap2) <= 2 && + (NPY_DOUBLE == typenum || NPY_CDOUBLE == typenum || + NPY_FLOAT == typenum || NPY_CFLOAT == typenum)) { + return cblas_matrixproduct(typenum, ap1, ap2, out); } +#endif + if (PyArray_NDIM(ap1) == 0 || PyArray_NDIM(ap2) == 0) { ret = (PyArray_NDIM(ap1) == 0 ? ap1 : ap2); ret = (PyArrayObject *)Py_TYPE(ret)->tp_as_number->nb_multiply( @@ -961,7 +1011,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) matchDim = 0; } if (PyArray_DIMS(ap2)[matchDim] != l) { - PyErr_SetString(PyExc_ValueError, "objects are not aligned"); + dot_alignment_error(ap1, PyArray_NDIM(ap1) - 1, ap2, matchDim); goto fail; } nd = PyArray_NDIM(ap1) + PyArray_NDIM(ap2) - 2; @@ -979,14 +1029,9 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) if(PyArray_NDIM(ap2) > 1) { dimensions[j++] = PyArray_DIMS(ap2)[PyArray_NDIM(ap2)-1]; } - /* - fprintf(stderr, "nd=%d dimensions=", nd); - for(i=0; i<j; i++) - fprintf(stderr, "%d ", dimensions[i]); - fprintf(stderr, "\n"); - */ - is1 = PyArray_STRIDES(ap1)[PyArray_NDIM(ap1)-1]; is2 = PyArray_STRIDES(ap2)[matchDim]; + is1 = PyArray_STRIDES(ap1)[PyArray_NDIM(ap1)-1]; + is2 = PyArray_STRIDES(ap2)[matchDim]; /* Choose which subtype to return */ ret = new_array_for_sum(ap1, ap2, out, nd, dimensions, typenum); if (ret == NULL) { @@ -1045,15 +1090,6 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) return NULL; } -/*NUMPY_API - *Numeric.matrixproduct(a,v) - * just like inner product but does the swapaxes stuff on the fly - */ -NPY_NO_EXPORT PyObject * -PyArray_MatrixProduct(PyObject *op1, PyObject *op2) -{ - return PyArray_MatrixProduct2(op1, op2, NULL); -} /*NUMPY_API * Copy and Transpose @@ -1187,10 +1223,18 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, ip2 -= is2; op += os; } - for (i = 0; i < (n1 - n2 + 1); i++) { - dot(ip1, is1, ip2, is2, op, n, ret); - ip1 += is1; - op += os; + if (small_correlate(ip1, is1, n1 - n2 + 1, PyArray_TYPE(ap1), + ip2, is2, n, PyArray_TYPE(ap2), + op, os)) { + ip1 += is1 * (n1 - n2 + 1); + op += os * (n1 - n2 + 1); + } + else { + for (i = 0; i < (n1 - n2 + 1); i++) { + dot(ip1, is1, ip2, is2, op, n, ret); + ip1 += is1; + op += os; + } } for (i = 0; i < n_right; i++) { n--; @@ -1615,6 +1659,73 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) "only 2 non-keyword arguments accepted"); return NULL; } + + /* super-fast path for ndarray argument calls */ + if (PyTuple_GET_SIZE(args) == 0) { + goto full_path; + } + op = PyTuple_GET_ITEM(args, 0); + if (PyArray_CheckExact(op)) { + PyObject * dtype_obj = Py_None; + oparr = (PyArrayObject *)op; + /* get dtype which can be positional */ + if (PyTuple_GET_SIZE(args) == 2) { + dtype_obj = PyTuple_GET_ITEM(args, 1); + } + else if (kws) { + dtype_obj = PyDict_GetItem(kws, npy_ma_str_dtype); + if (dtype_obj == NULL) { + dtype_obj = Py_None; + } + } + if (dtype_obj != Py_None) { + goto full_path; + } + + /* array(ndarray) */ + if (kws == NULL) { + ret = (PyArrayObject *)PyArray_NewCopy(oparr, order); + goto finish; + } + else { + /* fast path for copy=False rest default (np.asarray) */ + PyObject * copy_obj, * order_obj, *ndmin_obj; + copy_obj = PyDict_GetItem(kws, npy_ma_str_copy); + if (copy_obj != Py_False) { + goto full_path; + } + copy = NPY_FALSE; + + /* order does not matter for contiguous 1d arrays */ + if (PyArray_NDIM((PyArrayObject*)op) > 1 || + !PyArray_IS_C_CONTIGUOUS((PyArrayObject*)op)) { + order_obj = PyDict_GetItem(kws, npy_ma_str_order); + if (order_obj != Py_None && order_obj != NULL) { + goto full_path; + } + } + + ndmin_obj = PyDict_GetItem(kws, npy_ma_str_ndmin); + if (ndmin_obj) { + ndmin = PyLong_AsLong(ndmin_obj); + if (ndmin == -1 && PyErr_Occurred()) { + goto clean_type; + } + else if (ndmin > NPY_MAXDIMS) { + goto full_path; + } + } + + /* copy=False with default dtype, order and ndim */ + if (STRIDING_OK(oparr, order)) { + ret = oparr; + Py_INCREF(ret); + goto finish; + } + } + } + +full_path: if(!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&i", kwd, &op, PyArray_DescrConverter2, &type, @@ -1839,7 +1950,7 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) static char *kwlist[] = {"dtype","obj", NULL}; PyArray_Descr *typecode; - PyObject *obj = NULL; + PyObject *obj = NULL, *tmpobj = NULL; int alloc = 0; void *dptr; PyObject *ret; @@ -1871,14 +1982,32 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) alloc = 1; } else { +#if defined(NPY_PY3K) + /* Backward compatibility with Python 2 Numpy pickles */ + if (PyUnicode_Check(obj)) { + tmpobj = PyUnicode_AsLatin1String(obj); + obj = tmpobj; + if (tmpobj == NULL) { + /* More informative error message */ + PyErr_SetString(PyExc_ValueError, + "Failed to encode Numpy scalar data string to " + "latin1,\npickle.load(a, encoding='latin1') is " + "assumed if unpickling."); + return NULL; + } + } +#endif + if (!PyString_Check(obj)) { PyErr_SetString(PyExc_TypeError, "initializing object must be a string"); + Py_XDECREF(tmpobj); return NULL; } if (PyString_GET_SIZE(obj) < typecode->elsize) { PyErr_SetString(PyExc_ValueError, "initialization string is too small"); + Py_XDECREF(tmpobj); return NULL; } dptr = PyString_AS_STRING(obj); @@ -1890,6 +2019,7 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) if (alloc) { PyArray_free(dptr); } + Py_XDECREF(tmpobj); return ret; } @@ -2107,15 +2237,15 @@ array_innerproduct(PyObject *NPY_UNUSED(dummy), PyObject *args) static PyObject * array_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwds) { - int errval; static PyUFuncObject *cached_npy_dot = NULL; + int errval; PyObject *override = NULL; PyObject *v, *a, *o = NULL; + PyArrayObject *ret; char* kwlist[] = {"a", "b", "out", NULL }; - PyObject *module; if (cached_npy_dot == NULL) { - module = PyImport_ImportModule("numpy.core.multiarray"); + PyObject *module = PyImport_ImportModule("numpy.core.multiarray"); cached_npy_dot = (PyUFuncObject*)PyDict_GetItemString( PyModule_GetDict(module), "dot"); @@ -2143,9 +2273,120 @@ array_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwds) "'out' must be an array"); return NULL; } - return PyArray_Return((PyArrayObject *)PyArray_MatrixProduct2(a, v, (PyArrayObject *)o)); + ret = (PyArrayObject *)PyArray_MatrixProduct2(a, v, (PyArrayObject *)o); + return PyArray_Return(ret); +} + + +static PyObject * +array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *args) +{ + int typenum; + char *ip1, *ip2, *op; + npy_intp n, stride; + PyObject *op1, *op2; + PyArrayObject *ap1 = NULL, *ap2 = NULL, *ret = NULL; + PyArray_Descr *type; + PyArray_DotFunc *vdot; + NPY_BEGIN_THREADS_DEF; + + if (!PyArg_ParseTuple(args, "OO", &op1, &op2)) { + return NULL; + } + + /* + * Conjugating dot product using the BLAS for vectors. + * Flattens both op1 and op2 before dotting. + */ + typenum = PyArray_ObjectType(op1, 0); + typenum = PyArray_ObjectType(op2, typenum); + + type = PyArray_DescrFromType(typenum); + Py_INCREF(type); + ap1 = (PyArrayObject *)PyArray_FromAny(op1, type, 0, 0, 0, NULL); + if (ap1 == NULL) { + Py_DECREF(type); + goto fail; + } + op1 = PyArray_Ravel(ap1, NPY_CORDER); + if (op1 == NULL) { + Py_DECREF(type); + goto fail; + } + Py_DECREF(ap1); + ap1 = (PyArrayObject *)op1; + + ap2 = (PyArrayObject *)PyArray_FromAny(op2, type, 0, 0, 0, NULL); + if (ap2 == NULL) { + goto fail; + } + op2 = PyArray_Ravel(ap2, NPY_CORDER); + if (op2 == NULL) { + goto fail; + } + Py_DECREF(ap2); + ap2 = (PyArrayObject *)op2; + + if (PyArray_DIM(ap2, 0) != PyArray_DIM(ap1, 0)) { + PyErr_SetString(PyExc_ValueError, + "vectors have different lengths"); + goto fail; + } + + /* array scalar output */ + ret = new_array_for_sum(ap1, ap2, NULL, 0, (npy_intp *)NULL, typenum); + if (ret == NULL) { + goto fail; + } + + n = PyArray_DIM(ap1, 0); + stride = type->elsize; + ip1 = PyArray_DATA(ap1); + ip2 = PyArray_DATA(ap2); + op = PyArray_DATA(ret); + + switch (typenum) { + case NPY_CFLOAT: + vdot = (PyArray_DotFunc *)CFLOAT_vdot; + break; + case NPY_CDOUBLE: + vdot = (PyArray_DotFunc *)CDOUBLE_vdot; + break; + case NPY_CLONGDOUBLE: + vdot = (PyArray_DotFunc *)CLONGDOUBLE_vdot; + break; + case NPY_OBJECT: + vdot = (PyArray_DotFunc *)OBJECT_vdot; + break; + default: + vdot = type->f->dotfunc; + if (vdot == NULL) { + PyErr_SetString(PyExc_ValueError, + "function not available for this data type"); + goto fail; + } + } + + if (n < 500) { + vdot(ip1, stride, ip2, stride, op, n, NULL); + } + else { + NPY_BEGIN_THREADS_DESCR(type); + vdot(ip1, stride, ip2, stride, op, n, NULL); + NPY_END_THREADS_DESCR(type); + } + + Py_XDECREF(ap1); + Py_XDECREF(ap2); + return PyArray_Return(ret); +fail: + Py_XDECREF(ap1); + Py_XDECREF(ap2); + Py_XDECREF(ret); + return NULL; } + static int einsum_sub_op_from_str(PyObject *args, PyObject **str_obj, char **subscripts, PyArrayObject **op) @@ -2763,7 +3004,7 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) NULL, arr, ax, ay }; npy_uint32 op_flags[4] = { - NPY_ITER_WRITEONLY | NPY_ITER_ALLOCATE, + NPY_ITER_WRITEONLY | NPY_ITER_ALLOCATE | NPY_ITER_NO_SUBTYPE, NPY_ITER_READONLY, NPY_ITER_READONLY, NPY_ITER_READONLY }; PyArray_Descr * common_dt = PyArray_ResultType(2, &op_in[0] + 2, @@ -3713,6 +3954,9 @@ static struct PyMethodDef array_module_methods[] = { {"dot", (PyCFunction)array_matrixproduct, METH_VARARGS | METH_KEYWORDS, NULL}, + {"vdot", + (PyCFunction)array_vdot, + METH_VARARGS | METH_KEYWORDS, NULL}, {"einsum", (PyCFunction)array_einsum, METH_VARARGS|METH_KEYWORDS, NULL}, @@ -4004,6 +4248,38 @@ set_flaginfo(PyObject *d) return; } +NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array = NULL; +NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_prepare = NULL; +NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_wrap = NULL; +NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_finalize = NULL; +NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_buffer = NULL; +NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_ufunc = NULL; +NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_order = NULL; +NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_copy = NULL; +NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_dtype = NULL; +NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_ndmin = NULL; + +static int +intern_strings(void) +{ + npy_ma_str_array = PyUString_InternFromString("__array__"); + npy_ma_str_array_prepare = PyUString_InternFromString("__array_prepare__"); + npy_ma_str_array_wrap = PyUString_InternFromString("__array_wrap__"); + npy_ma_str_array_finalize = PyUString_InternFromString("__array_finalize__"); + npy_ma_str_buffer = PyUString_InternFromString("__buffer__"); + npy_ma_str_ufunc = PyUString_InternFromString("__numpy_ufunc__"); + npy_ma_str_order = PyUString_InternFromString("order"); + npy_ma_str_copy = PyUString_InternFromString("copy"); + npy_ma_str_dtype = PyUString_InternFromString("dtype"); + npy_ma_str_ndmin = PyUString_InternFromString("ndmin"); + + return npy_ma_str_array && npy_ma_str_array_prepare && + npy_ma_str_array_wrap && npy_ma_str_array_finalize && + npy_ma_str_array_finalize && npy_ma_str_ufunc && + npy_ma_str_order && npy_ma_str_copy && npy_ma_str_dtype && + npy_ma_str_ndmin; +} + #if defined(NPY_PY3K) static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, @@ -4176,6 +4452,10 @@ PyMODINIT_FUNC initmultiarray(void) { set_flaginfo(d); + if (!intern_strings()) { + goto err; + } + if (set_typeinfo(d) != 0) { goto err; } diff --git a/numpy/core/src/multiarray/multiarraymodule.h b/numpy/core/src/multiarray/multiarraymodule.h index 5a3b14b0b..82ae24845 100644 --- a/numpy/core/src/multiarray/multiarraymodule.h +++ b/numpy/core/src/multiarray/multiarraymodule.h @@ -1,4 +1,15 @@ #ifndef _NPY_MULTIARRAY_H_ #define _NPY_MULTIARRAY_H_ +NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array; +NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_prepare; +NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_wrap; +NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_finalize; +NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_buffer; +NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_ufunc; +NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_order; +NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_copy; +NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_dtype; +NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_ndmin; + #endif diff --git a/numpy/core/src/multiarray/multiarraymodule_onefile.c b/numpy/core/src/multiarray/multiarraymodule_onefile.c index 2d05c20ef..04fef61ce 100644 --- a/numpy/core/src/multiarray/multiarraymodule_onefile.c +++ b/numpy/core/src/multiarray/multiarraymodule_onefile.c @@ -2,7 +2,7 @@ * This file includes all the .c files needed for a complete multiarray module. * This is used in the case where separate compilation is not enabled * - * Note that the order of the includs matters + * Note that the order of the includes matters */ #include "common.c" @@ -15,6 +15,7 @@ #include "datetime_busday.c" #include "datetime_busdaycal.c" #include "arraytypes.c" +#include "vdot.c" #include "hashdescr.c" #include "numpyos.c" @@ -50,9 +51,10 @@ #include "array_assign_scalar.c" #include "array_assign_array.c" #include "ucsnarrow.c" - #include "arrayobject.c" - #include "numpymemoryview.c" - #include "multiarraymodule.c" + +#if defined(HAVE_CBLAS) +#include "cblasfuncs.c" +#endif diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c index 7e40377b2..77c45434f 100644 --- a/numpy/core/src/multiarray/nditer_pywrap.c +++ b/numpy/core/src/multiarray/nditer_pywrap.c @@ -129,7 +129,7 @@ NpyIter_GlobalFlagsConverter(PyObject *flags_in, npy_uint32 *flags) f = f_str; } - if (PyBytes_AsStringAndSize(f, &str, &length) == -1) { + if (PyBytes_AsStringAndSize(f, &str, &length) < 0) { Py_DECREF(f); return 0; } @@ -238,7 +238,7 @@ npyiter_order_converter(PyObject *order_in, NPY_ORDER *order) return ret; } - if (PyBytes_AsStringAndSize(order_in, &str, &length) == -1) { + if (PyBytes_AsStringAndSize(order_in, &str, &length) < 0) { return 0; } @@ -300,7 +300,8 @@ NpyIter_OpFlagsConverter(PyObject *op_flags_in, f = f_str; } - if (PyBytes_AsStringAndSize(f, &str, &length) == -1) { + if (PyBytes_AsStringAndSize(f, &str, &length) < 0) { + PyErr_Clear(); Py_DECREF(f); PyErr_SetString(PyExc_ValueError, "op_flags must be a tuple or array of per-op flag-tuples"); diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c index a26a93c1d..9c6e43c69 100644 --- a/numpy/core/src/multiarray/number.c +++ b/numpy/core/src/multiarray/number.c @@ -54,6 +54,7 @@ PyArray_SetNumericOps(PyObject *dict) SET(reciprocal); SET(_ones_like); SET(sqrt); + SET(cbrt); SET(negative); SET(absolute); SET(invert); @@ -89,7 +90,8 @@ PyArray_SetNumericOps(PyObject *dict) static int has_ufunc_attr(PyObject * obj) { /* attribute check is expensive for scalar operations, avoid if possible */ - if (PyArray_CheckExact(obj) || _is_basic_python_type(obj)) { + if (PyArray_CheckExact(obj) || PyArray_CheckAnyScalarExact(obj) || + _is_basic_python_type(obj)) { return 0; } else { diff --git a/numpy/core/src/multiarray/number.h b/numpy/core/src/multiarray/number.h index 4667b6b99..43f04d1c6 100644 --- a/numpy/core/src/multiarray/number.h +++ b/numpy/core/src/multiarray/number.h @@ -12,6 +12,7 @@ typedef struct { PyObject *reciprocal; PyObject *_ones_like; PyObject *sqrt; + PyObject *cbrt; PyObject *negative; PyObject *absolute; PyObject *invert; diff --git a/numpy/core/src/multiarray/numpyos.c b/numpy/core/src/multiarray/numpyos.c index 44b32f4da..dddead7ea 100644 --- a/numpy/core/src/multiarray/numpyos.c +++ b/numpy/core/src/multiarray/numpyos.c @@ -507,7 +507,7 @@ NumPyOS_ascii_strtod(const char *s, char** endptr) /* * ## 2 * - * At least Python versions <= 2.5.2 and <= 2.6.1 + * At least Python versions <= 2.6.1 * * Fails to do best-efforts parsing of strings of the form "1<DP>234" * where <DP> is the decimal point under the foreign locale. diff --git a/numpy/core/src/multiarray/scalarapi.c b/numpy/core/src/multiarray/scalarapi.c index d71823566..71a82d7a0 100644 --- a/numpy/core/src/multiarray/scalarapi.c +++ b/numpy/core/src/multiarray/scalarapi.c @@ -161,6 +161,15 @@ scalar_value(PyObject *scalar, PyArray_Descr *descr) } /*NUMPY_API + * return true an object is exactly a numpy scalar + */ +NPY_NO_EXPORT int +PyArray_CheckAnyScalarExact(PyObject * obj) +{ + return is_anyscalar_exact(obj); +} + +/*NUMPY_API * Convert to c-type * * no error checking is performed -- ctypeptr must be same type as scalar diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index 110bef248..4099326f1 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -25,6 +25,8 @@ #include "_datetime.h" #include "datetime_strings.h" +#include <stdlib.h> + NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[] = { {PyObject_HEAD_INIT(&PyBoolArrType_Type) 0}, {PyObject_HEAD_INIT(&PyBoolArrType_Type) 1}, @@ -1078,6 +1080,24 @@ gentype_richcompare(PyObject *self, PyObject *other, int cmp_op) { PyObject *arr, *ret; + /* + * If the other object is None, False is always right. This avoids + * the array None comparison, at least until deprecation it is fixed. + * After that, this may be removed and numpy false would be returned. + * + * NOTE: np.equal(NaT, None) evaluates to TRUE! This is an + * an inconsistency, which may has to be considered + * when the deprecation is finished. + */ + if (other == Py_None) { + if (cmp_op == Py_EQ) { + Py_RETURN_FALSE; + } + if (cmp_op == Py_NE) { + Py_RETURN_TRUE; + } + } + arr = PyArray_FromScalar(self, NULL); if (arr == NULL) { return NULL; @@ -1175,6 +1195,20 @@ gentype_size_get(PyObject *NPY_UNUSED(self)) return PyInt_FromLong(1); } +static PyObject * +gentype_sizeof(PyObject *self) +{ + Py_ssize_t nbytes; + PyObject * isz = gentype_itemsize_get(self); + if (isz == NULL) { + return NULL; + } + nbytes = PyLong_AsLong(isz) + Py_TYPE(self)->tp_basicsize + + Py_SIZE(self) * Py_TYPE(self)->tp_itemsize; + Py_DECREF(isz); + return PyLong_FromSsize_t(nbytes); +} + #if PY_VERSION_HEX >= 0x03000000 NPY_NO_EXPORT void gentype_struct_free(PyObject *ptr) @@ -1903,6 +1937,11 @@ static PyMethodDef gentype_methods[] = { (PyCFunction)gentype_wraparray, METH_VARARGS, doc_sc_wraparray}, + /* for the sys module */ + {"__sizeof__", + (PyCFunction)gentype_sizeof, + METH_NOARGS, NULL}, + /* for the copy module */ {"__copy__", (PyCFunction)gentype_copy, @@ -2894,7 +2933,7 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds)) if (PyErr_Occurred() || (memu > NPY_MAX_INT)) { PyErr_Clear(); PyErr_Format(PyExc_OverflowError, - "size must be smaller than %d", + "size cannot be greater than %d", (int) NPY_MAX_INT); return NULL; } @@ -4012,11 +4051,13 @@ initialize_casting_tables(void) static PyNumberMethods longdoubletype_as_number; static PyNumberMethods clongdoubletype_as_number; +static void init_basetypes(void); NPY_NO_EXPORT void initialize_numeric_types(void) { + init_basetypes(); PyGenericArrType_Type.tp_dealloc = (destructor)gentype_dealloc; PyGenericArrType_Type.tp_as_number = &gentype_as_number; PyGenericArrType_Type.tp_as_buffer = &gentype_as_buffer; @@ -4192,48 +4233,95 @@ initialize_numeric_types(void) PyArrayMapIter_Type.tp_iter = PyObject_SelfIter; } - -/* the order of this table is important */ -static PyTypeObject *typeobjects[] = { - &PyBoolArrType_Type, - &PyByteArrType_Type, - &PyUByteArrType_Type, - &PyShortArrType_Type, - &PyUShortArrType_Type, - &PyIntArrType_Type, - &PyUIntArrType_Type, - &PyLongArrType_Type, - &PyULongArrType_Type, - &PyLongLongArrType_Type, - &PyULongLongArrType_Type, - &PyFloatArrType_Type, - &PyDoubleArrType_Type, - &PyLongDoubleArrType_Type, - &PyCFloatArrType_Type, - &PyCDoubleArrType_Type, - &PyCLongDoubleArrType_Type, - &PyObjectArrType_Type, - &PyStringArrType_Type, - &PyUnicodeArrType_Type, - &PyVoidArrType_Type, - &PyDatetimeArrType_Type, - &PyTimedeltaArrType_Type, - &PyHalfArrType_Type +typedef struct { + PyTypeObject * type; + int typenum; +} scalar_type; + +static scalar_type typeobjects[] = { + {&PyBoolArrType_Type, NPY_BOOL}, + {&PyByteArrType_Type, NPY_BYTE}, + {&PyUByteArrType_Type, NPY_UBYTE}, + {&PyShortArrType_Type, NPY_SHORT}, + {&PyUShortArrType_Type, NPY_USHORT}, + {&PyIntArrType_Type, NPY_INT}, + {&PyUIntArrType_Type, NPY_UINT}, + {&PyLongArrType_Type, NPY_LONG}, + {&PyULongArrType_Type, NPY_ULONG}, + {&PyLongLongArrType_Type, NPY_LONGLONG}, + {&PyULongLongArrType_Type, NPY_ULONGLONG}, + {&PyFloatArrType_Type, NPY_FLOAT}, + {&PyDoubleArrType_Type, NPY_DOUBLE}, + {&PyLongDoubleArrType_Type, NPY_LONGDOUBLE}, + {&PyCFloatArrType_Type, NPY_CFLOAT}, + {&PyCDoubleArrType_Type, NPY_CDOUBLE}, + {&PyCLongDoubleArrType_Type, NPY_CLONGDOUBLE}, + {&PyObjectArrType_Type, NPY_OBJECT}, + {&PyStringArrType_Type, NPY_STRING}, + {&PyUnicodeArrType_Type, NPY_UNICODE}, + {&PyVoidArrType_Type, NPY_VOID}, + {&PyDatetimeArrType_Type, NPY_DATETIME}, + {&PyTimedeltaArrType_Type, NPY_TIMEDELTA}, + {&PyHalfArrType_Type, NPY_HALF} }; +static int compare_types(const void * a_, const void * b_) +{ + const PyTypeObject * a = ((const scalar_type *)a_)->type; + const PyTypeObject * b = ((const scalar_type *)b_)->type; + if (a < b) { + return -1; + } + else if (a > b) { + return 1; + } + return 0; +} + +static void init_basetypes(void) +{ + qsort(typeobjects, sizeof(typeobjects) / sizeof(typeobjects[0]), + sizeof(typeobjects[0]), + compare_types); +} + + +NPY_NO_EXPORT int +get_typeobj_idx(PyTypeObject * obj) +{ + npy_intp imin = 0, imax = sizeof(typeobjects) / sizeof(typeobjects[0]) - 1; + while (imax >= imin) + { + npy_intp imid = ((imax - imin) / 2) + imin; + if(typeobjects[imid].type == obj) { + return imid; + } + else if (typeobjects[imid].type < obj) { + imin = imid + 1; + } + else { + imax = imid - 1; + } + } + + return -1; +} + +NPY_NO_EXPORT int +is_anyscalar_exact(PyObject *obj) +{ + return get_typeobj_idx(Py_TYPE(obj)) >= 0; +} + NPY_NO_EXPORT int _typenum_fromtypeobj(PyObject *type, int user) { int typenum, i; typenum = NPY_NOTYPE; - i = 0; - while(i < NPY_NTYPES) { - if (type == (PyObject *)typeobjects[i]) { - typenum = i; - break; - } - i++; + i = get_typeobj_idx((PyTypeObject*)type); + if (i >= 0) { + typenum = typeobjects[i].typenum; } if (!user) { diff --git a/numpy/core/src/multiarray/scalartypes.h b/numpy/core/src/multiarray/scalartypes.h index 29fbb9e5e..c9b80f9b3 100644 --- a/numpy/core/src/multiarray/scalartypes.h +++ b/numpy/core/src/multiarray/scalartypes.h @@ -44,6 +44,9 @@ gentype_struct_free(void *ptr, void *arg); #endif NPY_NO_EXPORT int +is_anyscalar_exact(PyObject *obj); + +NPY_NO_EXPORT int _typenum_fromtypeobj(PyObject *type, int user); NPY_NO_EXPORT void * diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c index 2278b5d5b..df1874594 100644 --- a/numpy/core/src/multiarray/shape.c +++ b/numpy/core/src/multiarray/shape.c @@ -613,7 +613,7 @@ PyArray_SqueezeSelected(PyArrayObject *self, npy_bool *axis_flags) else { PyErr_SetString(PyExc_ValueError, "cannot select an axis to squeeze out " - "which has size greater than one"); + "which has size not equal to one"); return NULL; } } @@ -780,7 +780,8 @@ PyArray_Transpose(PyArrayObject *ap, PyArray_Dims *permute) PyArray_DIMS(ret)[i] = PyArray_DIMS(ap)[permutation[i]]; PyArray_STRIDES(ret)[i] = PyArray_STRIDES(ap)[permutation[i]]; } - PyArray_UpdateFlags(ret, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS); + PyArray_UpdateFlags(ret, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS | + NPY_ARRAY_ALIGNED); return (PyObject *)ret; } @@ -947,10 +948,8 @@ PyArray_Ravel(PyArrayObject *arr, NPY_ORDER order) newdim.ptr = val; - if (order == NPY_ANYORDER) { - order = PyArray_ISFORTRAN(arr) ? NPY_FORTRANORDER : NPY_CORDER; - } - else if (order == NPY_KEEPORDER) { + if (order == NPY_KEEPORDER) { + /* This handles some corner cases, such as 0-d arrays as well */ if (PyArray_IS_C_CONTIGUOUS(arr)) { order = NPY_CORDER; } @@ -959,41 +958,54 @@ PyArray_Ravel(PyArrayObject *arr, NPY_ORDER order) } } - if (order == NPY_CORDER && PyArray_IS_C_CONTIGUOUS(arr)) { - return PyArray_Newshape(arr, &newdim, NPY_CORDER); - } - else if (order == NPY_FORTRANORDER && PyArray_IS_F_CONTIGUOUS(arr)) { - return PyArray_Newshape(arr, &newdim, NPY_FORTRANORDER); + if (order != NPY_KEEPORDER) { + return PyArray_Newshape(arr, &newdim, order); } /* For KEEPORDER, check if we can make a flattened view */ - else if (order == NPY_KEEPORDER) { + else { npy_stride_sort_item strideperm[NPY_MAXDIMS]; - npy_intp stride; + npy_intp stride, base_stride = NPY_MIN_INTP; int i, ndim = PyArray_NDIM(arr); PyArray_CreateSortedStridePerm(PyArray_NDIM(arr), PyArray_STRIDES(arr), strideperm); - - stride = strideperm[ndim-1].stride; + for (i = ndim-1; i >= 0; --i) { - if (strideperm[i].stride != stride) { + if (PyArray_DIM(arr, strideperm[i].perm) == 1) { + /* A size one dimension does not matter */ + continue; + } + if (base_stride == NPY_MIN_INTP) { + stride = strideperm[i].stride; + base_stride = stride; + } + else if (strideperm[i].stride != stride) { break; } stride *= PyArray_DIM(arr, strideperm[i].perm); } +#if NPY_RELAXED_STRIDES_CHECKING == 0 + /* + * For tidyness, cannot be reached with relaxed strides checking + * since the array is guaranteed contiguous (without, not sure...) + */ + if (base_stride == NPY_MIN_INTP) { + base_stride = PyArray_ITEMSIZE(arr); + } +#endif + /* If all the strides matched a contiguous layout, return a view */ if (i < 0) { PyArrayObject *ret; - stride = strideperm[ndim-1].stride; val[0] = PyArray_SIZE(arr); Py_INCREF(PyArray_DESCR(arr)); ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(arr), PyArray_DESCR(arr), 1, val, - &stride, + &base_stride, PyArray_BYTES(arr), PyArray_FLAGS(arr), (PyObject *)arr); diff --git a/numpy/core/src/multiarray/templ_common.h.src b/numpy/core/src/multiarray/templ_common.h.src new file mode 100644 index 000000000..e417526df --- /dev/null +++ b/numpy/core/src/multiarray/templ_common.h.src @@ -0,0 +1,9 @@ +#ifndef __NPY_TYPED_COMMON_INC +#define __NPY_TYPED_COMMON_INC + +/* utility functions that profit from templates */ + +#include "numpy/npy_common.h" + + +#endif diff --git a/numpy/core/src/multiarray/testcalcs.py b/numpy/core/src/multiarray/testcalcs.py deleted file mode 100644 index e8b7b1734..000000000 --- a/numpy/core/src/multiarray/testcalcs.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from scipy import weave - -class YMD(object): - year = 0 - month = 0 - days = 0 - - -month_offset = [ - [ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 ], - [ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 ] -] - -days_in_month = [ - [ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 ], - [ 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 ] -] - -def is_leapyear(year): - return (year % 4 == 0) & ((year % 100 != 0) | (year % 400 == 0)) - - -# Return the year offset, that is the absolute date of the day -# 31.12.(year-1) since 31.12.1969 in the proleptic Gregorian calendar. - -def year_offset(year): - code = """ - year-=1970; - if ((year+1969) >= 0 || -1/4 == -1) - return_val = year*365 + year/4 - year/100 + year/400; - else - return_val = year*365 + (year-3)/4 - (year-99)/100 + (year-399)/400; - """ - return weave.inline(code, ['year']) - - -def days_from_ymd(year, month, day): - - leap = is_leapyear(year) - - # Negative month values indicate months relative to the years end */ - if (month < 0): month += 13 - if not (month >= 1 and month<=12): - raise ValueError("month out of range (1-21): %d" % month) - - # Negative values indicate days relative to the months end */ - if (day < 0): day += days_in_month[leap][month - 1] + 1 - if not (day >= 1 and day <= days_in_month[leap][month-1]): - raise ValueError("day out of range: %d" % day) - - # Number of days between Dec 31, (year - 1) and Dec 31, 1969 - # (can be negative). - # - yearoffset = year_offset(year); - - # Calculate the number of days using yearoffset */ - # Jan 1, 1970 is day 0 and thus Dec. 31, 1969 is day -1 */ - absdate = day-1 + month_offset[leap][month - 1] + yearoffset; - - return absdate; - - -def ymd_from_days(days): - ymd = YMD() - - year = 1970 + days / 365.2425 diff --git a/numpy/core/src/multiarray/vdot.c b/numpy/core/src/multiarray/vdot.c new file mode 100644 index 000000000..4be85672e --- /dev/null +++ b/numpy/core/src/multiarray/vdot.c @@ -0,0 +1,180 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION + +#include <Python.h> +#include "common.h" +#include "vdot.h" +#include "npy_cblas.h" + + +/* + * All data is assumed aligned. + */ +NPY_NO_EXPORT void +CFLOAT_vdot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, + char *op, npy_intp n, void *NPY_UNUSED(ignore)) +{ +#if defined(HAVE_CBLAS) + int is1b = blas_stride(is1, sizeof(npy_cfloat)); + int is2b = blas_stride(is2, sizeof(npy_cfloat)); + + if (is1b && is2b) { + double sum[2] = {0., 0.}; /* double for stability */ + + while (n > 0) { + int chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK; + float tmp[2]; + + cblas_cdotc_sub((int)n, ip1, is1b, ip2, is2b, tmp); + sum[0] += (double)tmp[0]; + sum[1] += (double)tmp[1]; + /* use char strides here */ + ip1 += chunk * is1; + ip2 += chunk * is2; + n -= chunk; + } + ((float *)op)[0] = (float)sum[0]; + ((float *)op)[1] = (float)sum[1]; + } + else +#endif + { + float sumr = (float)0.0; + float sumi = (float)0.0; + npy_intp i; + + for (i = 0; i < n; i++, ip1 += is1, ip2 += is2) { + const float ip1r = ((float *)ip1)[0]; + const float ip1i = ((float *)ip1)[1]; + const float ip2r = ((float *)ip2)[0]; + const float ip2i = ((float *)ip2)[1]; + + sumr += ip1r * ip2r + ip1i * ip2i; + sumi += ip1r * ip2i - ip1i * ip2r; + } + ((float *)op)[0] = sumr; + ((float *)op)[1] = sumi; + } +} + + +/* + * All data is assumed aligned. + */ +NPY_NO_EXPORT void +CDOUBLE_vdot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, + char *op, npy_intp n, void *NPY_UNUSED(ignore)) +{ +#if defined(HAVE_CBLAS) + int is1b = blas_stride(is1, sizeof(npy_cdouble)); + int is2b = blas_stride(is2, sizeof(npy_cdouble)); + + if (is1b && is2b) { + double sum[2] = {0., 0.}; /* double for stability */ + + while (n > 0) { + int chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK; + double tmp[2]; + + cblas_zdotc_sub((int)n, ip1, is1b, ip2, is2b, tmp); + sum[0] += (double)tmp[0]; + sum[1] += (double)tmp[1]; + /* use char strides here */ + ip1 += chunk * is1; + ip2 += chunk * is2; + n -= chunk; + } + ((double *)op)[0] = (double)sum[0]; + ((double *)op)[1] = (double)sum[1]; + } + else +#endif + { + double sumr = (double)0.0; + double sumi = (double)0.0; + npy_intp i; + + for (i = 0; i < n; i++, ip1 += is1, ip2 += is2) { + const double ip1r = ((double *)ip1)[0]; + const double ip1i = ((double *)ip1)[1]; + const double ip2r = ((double *)ip2)[0]; + const double ip2i = ((double *)ip2)[1]; + + sumr += ip1r * ip2r + ip1i * ip2i; + sumi += ip1r * ip2i - ip1i * ip2r; + } + ((double *)op)[0] = sumr; + ((double *)op)[1] = sumi; + } +} + + +/* + * All data is assumed aligned. + */ +NPY_NO_EXPORT void +CLONGDOUBLE_vdot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, + char *op, npy_intp n, void *NPY_UNUSED(ignore)) +{ + npy_longdouble tmpr = 0.0L; + npy_longdouble tmpi = 0.0L; + npy_intp i; + + for (i = 0; i < n; i++, ip1 += is1, ip2 += is2) { + const npy_longdouble ip1r = ((npy_longdouble *)ip1)[0]; + const npy_longdouble ip1i = ((npy_longdouble *)ip1)[1]; + const npy_longdouble ip2r = ((npy_longdouble *)ip2)[0]; + const npy_longdouble ip2i = ((npy_longdouble *)ip2)[1]; + + tmpr += ip1r * ip2r + ip1i * ip2i; + tmpi += ip1r * ip2i - ip1i * ip2r; + } + ((npy_longdouble *)op)[0] = tmpr; + ((npy_longdouble *)op)[1] = tmpi; +} + +/* + * All data is assumed aligned. + */ +NPY_NO_EXPORT void +OBJECT_vdot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp n, + void *NPY_UNUSED(ignore)) +{ + npy_intp i; + PyObject *tmp0, *tmp1, *tmp2, *tmp = NULL; + PyObject **tmp3; + for (i = 0; i < n; i++, ip1 += is1, ip2 += is2) { + if ((*((PyObject **)ip1) == NULL) || (*((PyObject **)ip2) == NULL)) { + tmp1 = Py_False; + Py_INCREF(Py_False); + } + else { + tmp0 = PyObject_CallMethod(*((PyObject **)ip1), "conjugate", NULL); + if (tmp0 == NULL) { + Py_XDECREF(tmp); + return; + } + tmp1 = PyNumber_Multiply(tmp0, *((PyObject **)ip2)); + Py_DECREF(tmp0); + if (tmp1 == NULL) { + Py_XDECREF(tmp); + return; + } + } + if (i == 0) { + tmp = tmp1; + } + else { + tmp2 = PyNumber_Add(tmp, tmp1); + Py_XDECREF(tmp); + Py_XDECREF(tmp1); + if (tmp2 == NULL) { + return; + } + tmp = tmp2; + } + } + tmp3 = (PyObject**) op; + tmp2 = *tmp3; + *((PyObject **)op) = tmp; + Py_XDECREF(tmp2); +} diff --git a/numpy/core/src/multiarray/vdot.h b/numpy/core/src/multiarray/vdot.h new file mode 100644 index 000000000..0f60ca6d1 --- /dev/null +++ b/numpy/core/src/multiarray/vdot.h @@ -0,0 +1,18 @@ +#ifndef _NPY_VDOT_H_ +#define _NPY_VDOT_H_ + +#include "common.h" + +NPY_NO_EXPORT void +CFLOAT_vdot(char *, npy_intp, char *, npy_intp, char *, npy_intp, void *); + +NPY_NO_EXPORT void +CDOUBLE_vdot(char *, npy_intp, char *, npy_intp, char *, npy_intp, void *); + +NPY_NO_EXPORT void +CLONGDOUBLE_vdot(char *, npy_intp, char *, npy_intp, char *, npy_intp, void *); + +NPY_NO_EXPORT void +OBJECT_vdot(char *, npy_intp, char *, npy_intp, char *, npy_intp, void *); + +#endif diff --git a/numpy/core/src/npymath/npy_math.c.src b/numpy/core/src/npymath/npy_math.c.src index 05af0b132..b7f28bb39 100644 --- a/numpy/core/src/npymath/npy_math.c.src +++ b/numpy/core/src/npymath/npy_math.c.src @@ -343,6 +343,7 @@ double npy_log2(double x) * asinh, acosh, atanh * * hypot, atan2, pow, fmod, modf + * ldexp, frexp * * We assume the above are always available in their double versions. * @@ -405,6 +406,26 @@ double npy_log2(double x) } #endif +#ifdef ldexp@c@ +#undef ldexp@c@ +#endif +#ifndef HAVE_LDEXP@C@ +@type@ npy_ldexp@c@(@type@ x, int exp) +{ + return (@type@) npy_ldexp((double)x, exp); +} +#endif + +#ifdef frexp@c@ +#undef frexp@c@ +#endif +#ifndef HAVE_FREXP@C@ +@type@ npy_frexp@c@(@type@ x, int* exp) +{ + return (@type@) npy_frexp(x, exp); +} +#endif + /**end repeat**/ @@ -451,6 +472,43 @@ double npy_log2(double x) } #endif +#ifdef HAVE_LDEXP@C@ +@type@ npy_ldexp@c@(@type@ x, int exp) +{ + return ldexp@c@(x, exp); +} +#endif + +#ifdef HAVE_FREXP@C@ +@type@ npy_frexp@c@(@type@ x, int* exp) +{ + return frexp@c@(x, exp); +} +#endif + +/* C99 but not mandatory */ + +#ifndef HAVE_CBRT@C@ +@type@ npy_cbrt@c@(@type@ x) +{ + /* don't set invalid flag */ + if (npy_isnan(x)) { + return NPY_NAN; + } + else if (x < 0) { + return -npy_pow@c@(-x, 1. / 3.); + } + else { + return npy_pow@c@(x, 1. / 3.); + } +} +#else +@type@ npy_cbrt@c@(@type@ x) +{ + return cbrt@c@(x); +} +#endif + /**end repeat**/ @@ -491,31 +549,43 @@ double npy_log2(double x) @type@ npy_logaddexp@c@(@type@ x, @type@ y) { - const @type@ tmp = x - y; - if (tmp > 0) { - return x + npy_log1p@c@(npy_exp@c@(-tmp)); - } - else if (tmp <= 0) { - return y + npy_log1p@c@(npy_exp@c@(tmp)); + if (x == y) { + /* Handles infinities of the same sign without warnings */ + return x + LOGE2; } else { - /* NaNs, or infinities of the same sign involved */ - return x + y; + const @type@ tmp = x - y; + if (tmp > 0) { + return x + npy_log1p@c@(npy_exp@c@(-tmp)); + } + else if (tmp <= 0) { + return y + npy_log1p@c@(npy_exp@c@(tmp)); + } + else { + /* NaNs */ + return tmp; + } } } @type@ npy_logaddexp2@c@(@type@ x, @type@ y) { - const @type@ tmp = x - y; - if (tmp > 0) { - return x + npy_log2_1p@c@(npy_exp2@c@(-tmp)); - } - else if (tmp <= 0) { - return y + npy_log2_1p@c@(npy_exp2@c@(tmp)); + if (x == y) { + /* Handles infinities of the same sign without warnings */ + return x + 1; } else { - /* NaNs, or infinities of the same sign involved */ - return x + y; + const @type@ tmp = x - y; + if (tmp > 0) { + return x + npy_log2_1p@c@(npy_exp2@c@(-tmp)); + } + else if (tmp <= 0) { + return y + npy_log2_1p@c@(npy_exp2@c@(tmp)); + } + else { + /* NaNs */ + return tmp; + } } } diff --git a/numpy/core/src/npysort/heapsort.c.src b/numpy/core/src/npysort/heapsort.c.src index 84c9d7bd4..cfdd3fd2a 100644 --- a/numpy/core/src/npysort/heapsort.c.src +++ b/numpy/core/src/npysort/heapsort.c.src @@ -28,9 +28,9 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include <stdlib.h> #include "npy_sort.h" #include "npysort_common.h" +#include <stdlib.h> #define NOT_USED NPY_UNUSED(unused) #define PYA_QS_STACK 100 @@ -184,6 +184,10 @@ heapsort_@suff@(@type@ *start, npy_intp n, PyArrayObject *arr) @type@ *a = start - len; npy_intp i, j, l; + if (tmp == NULL) { + return -NPY_ENOMEM; + } + for (l = n>>1; l > 0; --l) { @TYPE@_COPY(tmp, a + l*len, len); for (i = l, j = l<<1; j <= n;) { diff --git a/numpy/core/src/npysort/mergesort.c.src b/numpy/core/src/npysort/mergesort.c.src index 7f98c4016..c99c0e614 100644 --- a/numpy/core/src/npysort/mergesort.c.src +++ b/numpy/core/src/npysort/mergesort.c.src @@ -28,9 +28,9 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include <stdlib.h> #include "npy_sort.h" #include "npysort_common.h" +#include <stdlib.h> #define NOT_USED NPY_UNUSED(unused) #define PYA_QS_STACK 100 diff --git a/numpy/core/src/npysort/quicksort.c.src b/numpy/core/src/npysort/quicksort.c.src index 272615ab3..a27530eb4 100644 --- a/numpy/core/src/npysort/quicksort.c.src +++ b/numpy/core/src/npysort/quicksort.c.src @@ -28,9 +28,9 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include <stdlib.h> #include "npy_sort.h" #include "npysort_common.h" +#include <stdlib.h> #define NOT_USED NPY_UNUSED(unused) #define PYA_QS_STACK 100 diff --git a/numpy/core/src/npysort/selection.c.src b/numpy/core/src/npysort/selection.c.src index 920c07ec6..4167b2694 100644 --- a/numpy/core/src/npysort/selection.c.src +++ b/numpy/core/src/npysort/selection.c.src @@ -390,7 +390,10 @@ int /* move pivot into position */ SWAP(SORTEE(low), SORTEE(hh)); - store_pivot(hh, kth, pivots, npiv); + /* kth pivot stored later */ + if (hh != kth) { + store_pivot(hh, kth, pivots, npiv); + } if (hh >= kth) high = hh - 1; @@ -400,10 +403,11 @@ int /* two elements */ if (high == low + 1) { - if (@TYPE@_LT(v[IDX(high)], v[IDX(low)])) + if (@TYPE@_LT(v[IDX(high)], v[IDX(low)])) { SWAP(SORTEE(high), SORTEE(low)) - store_pivot(low, kth, pivots, npiv); + } } + store_pivot(kth, kth, pivots, npiv); return 0; } diff --git a/numpy/core/blasdot/cblas.h b/numpy/core/src/private/npy_cblas.h index 25de09edf..a083f3bcc 100644 --- a/numpy/core/blasdot/cblas.h +++ b/numpy/core/src/private/npy_cblas.h @@ -1,5 +1,11 @@ -#ifndef CBLAS_H -#define CBLAS_H +/* + * This header provides numpy a consistent interface to CBLAS code. It is needed + * because not all providers of cblas provide cblas.h. For instance, MKL provides + * mkl_cblas.h and also typedefs the CBLAS_XXX enums. + */ +#ifndef _NPY_CBLAS_H_ +#define _NPY_CBLAS_H_ + #include <stddef.h> /* Allow the use in C++ code. */ diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h index 453dbd065..882913e2f 100644 --- a/numpy/core/src/private/npy_config.h +++ b/numpy/core/src/private/npy_config.h @@ -10,15 +10,16 @@ #undef HAVE_HYPOT #endif -/* Safe to use ldexp and frexp for long double for MSVC builds */ -#if (NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE) || defined(_MSC_VER) - #ifdef HAVE_LDEXP - #define HAVE_LDEXPL 1 - #endif - #ifdef HAVE_FREXP - #define HAVE_FREXPL 1 - #endif -#endif +/* + * largest alignment the copy loops might require + * required as string, void and complex types might get copied using larger + * instructions than required to operate on them. E.g. complex float is copied + * in 8 byte moves but arithmetic on them only loads in 4 byte moves. + * the sparc platform may need that alignment for long doubles. + * amd64 is not harmed much by the bloat as the system provides 16 byte + * alignment by default. + */ +#define NPY_MAX_COPY_ALIGNMENT 16 /* Disable broken Sun Workshop Pro math functions */ #ifdef __SUNPRO_C diff --git a/numpy/core/src/private/ufunc_override.h b/numpy/core/src/private/ufunc_override.h index 6b0f73fcf..c47c46a66 100644 --- a/numpy/core/src/private/ufunc_override.h +++ b/numpy/core/src/private/ufunc_override.h @@ -26,6 +26,7 @@ normalize___call___args(PyUFuncObject *ufunc, PyObject *args, else { obj = PyTuple_GetSlice(args, nin, nargs); PyDict_SetItemString(*normal_kwds, "out", obj); + Py_DECREF(obj); } } } diff --git a/numpy/core/src/umath/funcs.inc.src b/numpy/core/src/umath/funcs.inc.src index 9df39e41f..3aad44c9f 100644 --- a/numpy/core/src/umath/funcs.inc.src +++ b/numpy/core/src/umath/funcs.inc.src @@ -55,8 +55,6 @@ npy_ObjectPower(PyObject *x, PyObject *y) return PyNumber_Power(x, y, Py_None); } - -#if defined(NPY_PY3K) /**begin repeat * #Kind = Max, Min# * #OP = Py_GE, Py_LE# @@ -82,33 +80,6 @@ npy_Object@Kind@(PyObject *i1, PyObject *i2) } /**end repeat**/ -#else -/**begin repeat - * #Kind = Max, Min# - * #OP = >=, <=# - */ -static PyObject * -npy_Object@Kind@(PyObject *i1, PyObject *i2) -{ - PyObject *result; - int cmp; - - if (PyObject_Cmp(i1, i2, &cmp) < 0) { - return NULL; - } - if (cmp @OP@ 0) { - result = i1; - } - else { - result = i2; - } - Py_INCREF(result); - return result; -} -/**end repeat**/ -#endif - - /* Emulates Python's 'a or b' behavior */ static PyObject * npy_ObjectLogicalOr(PyObject *i1, PyObject *i2) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 89f1206b4..a69fc8147 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -47,20 +47,23 @@ ***************************************************************************** */ +/* unary loop input and output contiguous */ +#define IS_UNARY_CONT(tin, tout) (steps[0] == sizeof(tin) && \ + steps[1] == sizeof(tout)) #define IS_BINARY_REDUCE ((args[0] == args[2])\ && (steps[0] == steps[2])\ && (steps[0] == 0)) -/* binary loop input and output continous */ +/* binary loop input and output contiguous */ #define IS_BINARY_CONT(tin, tout) (steps[0] == sizeof(tin) && \ steps[1] == sizeof(tin) && \ steps[2] == sizeof(tout)) -/* binary loop input and output continous with first scalar */ +/* binary loop input and output contiguous with first scalar */ #define IS_BINARY_CONT_S1(tin, tout) (steps[0] == 0 && \ steps[1] == sizeof(tin) && \ steps[2] == sizeof(tout)) -/* binary loop input and output continous with second scalar */ +/* binary loop input and output contiguous with second scalar */ #define IS_BINARY_CONT_S2(tin, tout) (steps[0] == sizeof(tin) && \ steps[1] == 0 && \ steps[2] == sizeof(tout)) @@ -79,6 +82,33 @@ npy_intp i;\ for(i = 0; i < n; i++, ip1 += is1, op1 += os1) +/* + * loop with contiguous specialization + * op should be the code working on `tin in` and + * storing the result in `tout * out` + * combine with NPY_GCC_OPT_3 to allow autovectorization + * should only be used where its worthwhile to avoid code bloat + */ +#define UNARY_LOOP_FAST(tin, tout, op) \ + do { \ + /* condition allows compiler to optimize the generic macro */ \ + if (IS_UNARY_CONT(tin, tout)) { \ + UNARY_LOOP { \ + const tin in = *(tin *)ip1; \ + tout * out = (tout *)op1; \ + op; \ + } \ + } \ + else { \ + UNARY_LOOP { \ + const tin in = *(tin *)ip1; \ + tout * out = (tout *)op1; \ + op; \ + } \ + } \ + } \ + while (0) + #define UNARY_LOOP_TWO_OUT\ char *ip1 = args[0], *op1 = args[1], *op2 = args[2];\ npy_intp is1 = steps[0], os1 = steps[1], os2 = steps[2];\ @@ -93,6 +123,51 @@ npy_intp i;\ for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) +/* + * loop with contiguous specialization + * op should be the code working on `tin in1`, `tin in2` and + * storing the result in `tout * out` + * combine with NPY_GCC_OPT_3 to allow autovectorization + * should only be used where its worthwhile to avoid code bloat + */ +#define BINARY_LOOP_FAST(tin, tout, op) \ + do { \ + /* condition allows compiler to optimize the generic macro */ \ + if (IS_BINARY_CONT(tin, tout)) { \ + BINARY_LOOP { \ + const tin in1 = *(tin *)ip1; \ + const tin in2 = *(tin *)ip2; \ + tout * out = (tout *)op1; \ + op; \ + } \ + } \ + else if (IS_BINARY_CONT_S1(tin, tout)) { \ + const tin in1 = *(tin *)args[0]; \ + BINARY_LOOP { \ + const tin in2 = *(tin *)ip2; \ + tout * out = (tout *)op1; \ + op; \ + } \ + } \ + else if (IS_BINARY_CONT_S2(tin, tout)) { \ + const tin in2 = *(tin *)args[1]; \ + BINARY_LOOP { \ + const tin in1 = *(tin *)ip1; \ + tout * out = (tout *)op1; \ + op; \ + } \ + } \ + else { \ + BINARY_LOOP { \ + const tin in1 = *(tin *)ip1; \ + const tin in2 = *(tin *)ip2; \ + tout * out = (tout *)op1; \ + op; \ + } \ + } \ + } \ + while (0) + #define BINARY_REDUCE_LOOP_INNER\ char *ip2 = args[1]; \ npy_intp is2 = steps[1]; \ @@ -703,58 +778,40 @@ NPY_NO_EXPORT void } } -NPY_NO_EXPORT void +NPY_NO_EXPORT NPY_GCC_OPT_3 void @TYPE@_square(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data)) { - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = in1*in1; - } + UNARY_LOOP_FAST(@type@, @type@, *out = in * in); } NPY_NO_EXPORT void @TYPE@_reciprocal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data)) { - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = (@type@)(1.0/in1); - } + UNARY_LOOP_FAST(@type@, @type@, *out = 1.0 / in); } NPY_NO_EXPORT void @TYPE@_conjugate(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = in1; - } + UNARY_LOOP_FAST(@type@, @type@, *out = in); } -NPY_NO_EXPORT void +NPY_NO_EXPORT NPY_GCC_OPT_3 void @TYPE@_negative(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = (@type@)(-(@type@)in1); - } + UNARY_LOOP_FAST(@type@, @type@, *out = -in); } -NPY_NO_EXPORT void +NPY_NO_EXPORT NPY_GCC_OPT_3 void @TYPE@_logical_not(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((npy_bool *)op1) = !in1; - } + UNARY_LOOP_FAST(@type@, npy_bool, *out = !in); } -NPY_NO_EXPORT void +NPY_NO_EXPORT NPY_GCC_OPT_3 void @TYPE@_invert(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = ~in1; - } + UNARY_LOOP_FAST(@type@, @type@, *out = ~in); } /**begin repeat1 @@ -764,7 +821,7 @@ NPY_NO_EXPORT void * #OP = +, -,*, &, |, ^, <<, >># */ -NPY_NO_EXPORT void +NPY_NO_EXPORT NPY_GCC_OPT_3 void @TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { if(IS_BINARY_REDUCE) { @@ -774,11 +831,7 @@ NPY_NO_EXPORT void *((@type@ *)iop1) = io1; } else { - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = in1 @OP@ in2; - } + BINARY_LOOP_FAST(@type@, @type@, *out = in1 @OP@ in2); } } @@ -797,39 +850,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void * gcc vectorization of this is not good (PR60575) but manual integer * vectorization is too tedious to be worthwhile */ - if (IS_BINARY_CONT(@type@, npy_bool)) { - npy_intp i, n = dimensions[0]; - @type@ * a = (@type@ *)args[0], * b = (@type@ *)args[1]; - npy_bool * o = (npy_bool *)args[2]; - for (i = 0; i < n; i++) { - o[i] = a[i] @OP@ b[i]; - } - } - else if (IS_BINARY_CONT_S1(@type@, npy_bool)) { - npy_intp i, n = dimensions[0]; - @type@ a = *(@type@ *)args[0]; - @type@ * b = (@type@ *)args[1]; - npy_bool * o = (npy_bool *)args[2]; - for (i = 0; i < n; i++) { - o[i] = a @OP@ b[i]; - } - } - else if (IS_BINARY_CONT_S2(@type@, npy_bool)) { - npy_intp i, n = dimensions[0]; - @type@ * a = (@type@ *)args[0]; - @type@ b = *(@type@*)args[1]; - npy_bool * o = (npy_bool *)args[2]; - for (i = 0; i < n; i++) { - o[i] = a[i] @OP@ b; - } - } - else { - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((npy_bool *)op1) = in1 @OP@ in2; - } - } + BINARY_LOOP_FAST(@type@, npy_bool, *out = in1 @OP@ in2); } /**end repeat1**/ @@ -838,9 +859,9 @@ NPY_NO_EXPORT void @TYPE@_logical_xor(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((npy_bool *)op1)= (in1 && !in2) || (!in1 && in2); + const int t1 = !!*(@type@ *)ip1; + const int t2 = !!*(@type@ *)ip2; + *((npy_bool *)op1) = (t1 != t2); } } @@ -914,22 +935,16 @@ NPY_NO_EXPORT void * #type = npy_byte, npy_short, npy_int, npy_long, npy_longlong# */ -NPY_NO_EXPORT void +NPY_NO_EXPORT NPY_GCC_OPT_3 void @TYPE@_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = (in1 >= 0) ? in1 : -in1; - } + UNARY_LOOP_FAST(@type@, @type@, *out = (in >= 0) ? in : -in); } -NPY_NO_EXPORT void +NPY_NO_EXPORT NPY_GCC_OPT_3 void @TYPE@_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = in1 > 0 ? 1 : (in1 < 0 ? -1 : 0); - } + UNARY_LOOP_FAST(@type@, @type@, *out = in > 0 ? 1 : (in < 0 ? -1 : 0)); } NPY_NO_EXPORT void @@ -997,13 +1012,10 @@ NPY_NO_EXPORT void } } -NPY_NO_EXPORT void +NPY_NO_EXPORT NPY_GCC_OPT_3 void @TYPE@_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = in1 > 0 ? 1 : 0; - } + UNARY_LOOP_FAST(@type@, @type@, *out = in > 0 ? 1 : 0); } NPY_NO_EXPORT void @@ -1515,9 +1527,9 @@ NPY_NO_EXPORT void @TYPE@_logical_xor(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((npy_bool *)op1)= (in1 && !in2) || (!in1 && in2); + const int t1 = !!*(@type@ *)ip1; + const int t2 = !!*(@type@ *)ip2; + *((npy_bool *)op1) = (t1 != t2); } } @@ -1743,25 +1755,22 @@ NPY_NO_EXPORT void } } -#ifdef HAVE_FREXP@C@ NPY_NO_EXPORT void @TYPE@_frexp(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { UNARY_LOOP_TWO_OUT { const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = frexp@c@(in1, (int *)op2); + *((@type@ *)op1) = npy_frexp@c@(in1, (int *)op2); } } -#endif -#ifdef HAVE_LDEXP@C@ NPY_NO_EXPORT void @TYPE@_ldexp(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const int in2 = *(int *)ip2; - *((@type@ *)op1) = ldexp@c@(in1, in2); + *((@type@ *)op1) = npy_ldexp@c@(in1, in2); } } @@ -1778,7 +1787,7 @@ NPY_NO_EXPORT void const long in2 = *(long *)ip2; if (((int)in2) == in2) { /* Range OK */ - *((@type@ *)op1) = ldexp@c@(in1, ((int)in2)); + *((@type@ *)op1) = npy_ldexp@c@(in1, ((int)in2)); } else { /* @@ -1786,15 +1795,14 @@ NPY_NO_EXPORT void * given that exponent has less bits than npy_int. */ if (in2 > 0) { - *((@type@ *)op1) = ldexp@c@(in1, NPY_MAX_INT); + *((@type@ *)op1) = npy_ldexp@c@(in1, NPY_MAX_INT); } else { - *((@type@ *)op1) = ldexp@c@(in1, NPY_MIN_INT); + *((@type@ *)op1) = npy_ldexp@c@(in1, NPY_MIN_INT); } } } } -#endif #define @TYPE@_true_divide @TYPE@_divide @@ -1868,7 +1876,7 @@ HALF_logical_xor(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_U BINARY_LOOP { const int in1 = !npy_half_iszero(*(npy_half *)ip1); const int in2 = !npy_half_iszero(*(npy_half *)ip2); - *((npy_bool *)op1)= (in1 && !in2) || (!in1 && in2); + *((npy_bool *)op1) = (in1 != in2); } } @@ -2059,25 +2067,22 @@ HALF_modf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(f } } -#ifdef HAVE_FREXPF NPY_NO_EXPORT void HALF_frexp(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { UNARY_LOOP_TWO_OUT { const float in1 = npy_half_to_float(*(npy_half *)ip1); - *((npy_half *)op1) = npy_float_to_half(frexpf(in1, (int *)op2)); + *((npy_half *)op1) = npy_float_to_half(npy_frexpf(in1, (int *)op2)); } } -#endif -#ifdef HAVE_LDEXPF NPY_NO_EXPORT void HALF_ldexp(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { BINARY_LOOP { const float in1 = npy_half_to_float(*(npy_half *)ip1); const int in2 = *(int *)ip2; - *((npy_half *)op1) = npy_float_to_half(ldexpf(in1, in2)); + *((npy_half *)op1) = npy_float_to_half(npy_ldexpf(in1, in2)); } } @@ -2094,7 +2099,7 @@ HALF_ldexp_long(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UN const long in2 = *(long *)ip2; if (((int)in2) == in2) { /* Range OK */ - *((npy_half *)op1) = npy_float_to_half(ldexpf(in1, ((int)in2))); + *((npy_half *)op1) = npy_float_to_half(npy_ldexpf(in1, ((int)in2))); } else { /* @@ -2102,15 +2107,14 @@ HALF_ldexp_long(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UN * given that exponent has less bits than npy_int. */ if (in2 > 0) { - *((npy_half *)op1) = npy_float_to_half(ldexpf(in1, NPY_MAX_INT)); + *((npy_half *)op1) = npy_float_to_half(npy_ldexpf(in1, NPY_MAX_INT)); } else { - *((npy_half *)op1) = npy_float_to_half(ldexpf(in1, NPY_MIN_INT)); + *((npy_half *)op1) = npy_float_to_half(npy_ldexpf(in1, NPY_MIN_INT)); } } } } -#endif #define HALF_true_divide HALF_divide @@ -2354,7 +2358,7 @@ NPY_NO_EXPORT void const @ftype@ in2i = ((@ftype@ *)ip2)[1]; const npy_bool tmp1 = (in1r || in1i); const npy_bool tmp2 = (in2r || in2i); - *((npy_bool *)op1) = (tmp1 && !tmp2) || (!tmp1 && tmp2); + *((npy_bool *)op1) = tmp1 != tmp2; } } @@ -2572,6 +2576,7 @@ OBJECT_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUS return; } ret = PyObject_IsTrue(ret_obj); + Py_DECREF(ret_obj); if (ret == -1) { #if @identity@ != -1 if (in1 == in2) { @@ -2621,6 +2626,7 @@ OBJECT_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED } ret = PyLong_FromLong(v); if (PyErr_Occurred()) { + Py_DECREF(zero); return; } Py_XDECREF(*out); @@ -2635,6 +2641,7 @@ OBJECT_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED PyObject *ret = PyInt_FromLong( PyObject_Compare(in1 ? in1 : Py_None, zero)); if (PyErr_Occurred()) { + Py_DECREF(zero); return; } Py_XDECREF(*out); diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src index fdc9230de..a6e775a3a 100644 --- a/numpy/core/src/umath/loops.h.src +++ b/numpy/core/src/umath/loops.h.src @@ -248,17 +248,13 @@ NPY_NO_EXPORT void NPY_NO_EXPORT void @TYPE@_modf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)); -#ifdef HAVE_FREXP@C@ NPY_NO_EXPORT void @TYPE@_frexp(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)); -#endif -#ifdef HAVE_LDEXP@C@ NPY_NO_EXPORT void @TYPE@_ldexp(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)); NPY_NO_EXPORT void @TYPE@_ldexp_long(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)); -#endif #define @TYPE@_true_divide @TYPE@_divide diff --git a/numpy/core/src/umath/operand_flag_tests.c.src b/numpy/core/src/umath/operand_flag_tests.c.src index 4fb428bfc..046c37595 100644 --- a/numpy/core/src/umath/operand_flag_tests.c.src +++ b/numpy/core/src/umath/operand_flag_tests.c.src @@ -1,11 +1,11 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include <math.h> #include <Python.h> -#include <structmember.h> #include <numpy/arrayobject.h> #include <numpy/ufuncobject.h> #include "numpy/npy_3kcompat.h" +#include <math.h> +#include <structmember.h> static PyMethodDef TestMethods[] = { diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index 92dc0c659..5b111eb0d 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -37,7 +37,9 @@ ((abs(args[1] - args[0]) >= (vsize)) || ((abs(args[1] - args[0]) == 0)))) #define IS_BLOCKABLE_REDUCE(esize, vsize) \ - (steps[1] == (esize) && abs(args[1] - args[0]) >= (vsize)) + (steps[1] == (esize) && abs(args[1] - args[0]) >= (vsize) && \ + npy_is_aligned(args[1], (esize)) && \ + npy_is_aligned(args[0], (esize))) #define IS_BLOCKABLE_BINARY(esize, vsize) \ (steps[0] == steps[1] && steps[1] == steps[2] && steps[2] == (esize) && \ @@ -480,14 +482,18 @@ sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_i /**end repeat1**/ -/* compress 4 vectors to 4/8 bytes in op with filled with 0 or 1 */ +/* + * compress 4 vectors to 4/8 bytes in op with filled with 0 or 1 + * the last vector is passed as a pointer as MSVC 2010 is unable to ignore the + * calling convention leading to C2719 on 32 bit, see #4795 + */ static NPY_INLINE void -sse2_compress4_to_byte_@TYPE@(@vtype@ r1, @vtype@ r2, @vtype@ r3, @vtype@ r4, +sse2_compress4_to_byte_@TYPE@(@vtype@ r1, @vtype@ r2, @vtype@ r3, @vtype@ * r4, npy_bool * op) { const __m128i mask = @vpre@_set1_epi8(0x1); __m128i ir1 = @vpre@_packs_epi32(@cast@(r1), @cast@(r2)); - __m128i ir2 = @vpre@_packs_epi32(@cast@(r3), @cast@(r4)); + __m128i ir2 = @vpre@_packs_epi32(@cast@(r3), @cast@(*r4)); __m128i rr = @vpre@_packs_epi16(ir1, ir2); #if @double@ rr = @vpre@_packs_epi16(rr, rr); @@ -535,7 +541,7 @@ sse2_binary_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy_intp n) @vtype@ r2 = @vpre@_@VOP@_@vsuf@(b, b); @vtype@ r3 = @vpre@_@VOP@_@vsuf@(c, c); @vtype@ r4 = @vpre@_@VOP@_@vsuf@(d, d); - sse2_compress4_to_byte_@TYPE@(r1, r2, r3, r4, &op[i]); + sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]); } } else { @@ -552,7 +558,7 @@ sse2_binary_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy_intp n) @vtype@ r2 = @vpre@_@VOP@_@vsuf@(b1, b2); @vtype@ r3 = @vpre@_@VOP@_@vsuf@(c1, c2); @vtype@ r4 = @vpre@_@VOP@_@vsuf@(d1, d2); - sse2_compress4_to_byte_@TYPE@(r1, r2, r3, r4, &op[i]); + sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]); } } LOOP_BLOCKED_END { @@ -577,7 +583,7 @@ sse2_binary_scalar1_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy @vtype@ r2 = @vpre@_@VOP@_@vsuf@(s, b); @vtype@ r3 = @vpre@_@VOP@_@vsuf@(s, c); @vtype@ r4 = @vpre@_@VOP@_@vsuf@(s, d); - sse2_compress4_to_byte_@TYPE@(r1, r2, r3, r4, &op[i]); + sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]); } LOOP_BLOCKED_END { op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[0], ip2[i]); @@ -601,7 +607,7 @@ sse2_binary_scalar2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy @vtype@ r2 = @vpre@_@VOP@_@vsuf@(b, s); @vtype@ r3 = @vpre@_@VOP@_@vsuf@(c, s); @vtype@ r4 = @vpre@_@VOP@_@vsuf@(d, s); - sse2_compress4_to_byte_@TYPE@(r1, r2, r3, r4, &op[i]); + sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]); } LOOP_BLOCKED_END { op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[i], ip2[0]); diff --git a/numpy/core/src/umath/test_rational.c.src b/numpy/core/src/umath/test_rational.c.src index 5ce792266..5c4f29f73 100644 --- a/numpy/core/src/umath/test_rational.c.src +++ b/numpy/core/src/umath/test_rational.c.src @@ -2,12 +2,12 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include <math.h> #include <Python.h> #include <structmember.h> #include <numpy/arrayobject.h> #include <numpy/ufuncobject.h> #include <numpy/npy_3kcompat.h> +#include <math.h> /* Relevant arithmetic exceptions */ diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index b8098531a..dc5065f14 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -73,7 +73,7 @@ static int _does_loop_use_arrays(void *data); static int -_extract_pyvals(PyObject *ref, char *name, int *bufsize, +_extract_pyvals(PyObject *ref, const char *name, int *bufsize, int *errmask, PyObject **errobj); static int @@ -237,7 +237,7 @@ static int PyUFunc_NUM_NODEFAULTS = 0; #endif static PyObject * -_get_global_ext_obj(char * name) +get_global_ext_obj(void) { PyObject *thedict; PyObject *ref = NULL; @@ -259,12 +259,12 @@ _get_global_ext_obj(char * name) static int -_get_bufsize_errmask(PyObject * extobj, char * ufunc_name, +_get_bufsize_errmask(PyObject * extobj, const char *ufunc_name, int *buffersize, int *errormask) { /* Get the buffersize and errormask */ if (extobj == NULL) { - extobj = _get_global_ext_obj(ufunc_name); + extobj = get_global_ext_obj(); } if (_extract_pyvals(extobj, ufunc_name, buffersize, errormask, NULL) < 0) { @@ -430,7 +430,7 @@ _find_array_prepare(PyObject *args, PyObject *kwds, * if an error handling method is 'call' */ static int -_extract_pyvals(PyObject *ref, char *name, int *bufsize, +_extract_pyvals(PyObject *ref, const char *name, int *bufsize, int *errmask, PyObject **errobj) { PyObject *retval; @@ -518,41 +518,41 @@ _extract_pyvals(PyObject *ref, char *name, int *bufsize, NPY_NO_EXPORT int PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject **errobj) { - PyObject *ref = _get_global_ext_obj(name); + PyObject *ref = get_global_ext_obj(); return _extract_pyvals(ref, name, bufsize, errmask, errobj); } -#define _GETATTR_(str, rstr) do {if (strcmp(name, #str) == 0) \ +#define GETATTR(str, rstr) do {if (strcmp(name, #str) == 0) \ return PyObject_HasAttrString(op, "__" #rstr "__");} while (0); static int -_has_reflected_op(PyObject *op, char *name) +_has_reflected_op(PyObject *op, const char *name) { - _GETATTR_(add, radd); - _GETATTR_(subtract, rsub); - _GETATTR_(multiply, rmul); - _GETATTR_(divide, rdiv); - _GETATTR_(true_divide, rtruediv); - _GETATTR_(floor_divide, rfloordiv); - _GETATTR_(remainder, rmod); - _GETATTR_(power, rpow); - _GETATTR_(left_shift, rlshift); - _GETATTR_(right_shift, rrshift); - _GETATTR_(bitwise_and, rand); - _GETATTR_(bitwise_xor, rxor); - _GETATTR_(bitwise_or, ror); + GETATTR(add, radd); + GETATTR(subtract, rsub); + GETATTR(multiply, rmul); + GETATTR(divide, rdiv); + GETATTR(true_divide, rtruediv); + GETATTR(floor_divide, rfloordiv); + GETATTR(remainder, rmod); + GETATTR(power, rpow); + GETATTR(left_shift, rlshift); + GETATTR(right_shift, rrshift); + GETATTR(bitwise_and, rand); + GETATTR(bitwise_xor, rxor); + GETATTR(bitwise_or, ror); /* Comparisons */ - _GETATTR_(equal, eq); - _GETATTR_(not_equal, ne); - _GETATTR_(greater, lt); - _GETATTR_(less, gt); - _GETATTR_(greater_equal, le); - _GETATTR_(less_equal, ge); + GETATTR(equal, eq); + GETATTR(not_equal, ne); + GETATTR(greater, lt); + GETATTR(less, gt); + GETATTR(greater_equal, le); + GETATTR(less_equal, ge); return 0; } -#undef _GETATTR_ +#undef GETATTR /* Return the position of next non-white-space char in the string */ @@ -743,16 +743,9 @@ _parse_signature(PyUFuncObject *ufunc, const char *signature) fail: PyArray_free((void*)var_names); if (parse_error) { - char *buf = PyArray_malloc(sizeof(char) * (len + 200)); - if (buf) { - sprintf(buf, "%s at position %d in \"%s\"", - parse_error, i, signature); - PyErr_SetString(PyExc_ValueError, signature); - PyArray_free(buf); - } - else { - PyErr_NoMemory(); - } + PyErr_Format(PyExc_ValueError, + "%s at position %d in \"%s\"", + parse_error, i, signature); } return -1; } @@ -779,7 +772,7 @@ static int get_ufunc_arguments(PyUFuncObject *ufunc, int i, nargs, nin = ufunc->nin; PyObject *obj, *context; PyObject *str_key_obj = NULL; - char *ufunc_name; + const char *ufunc_name; int type_num; int any_flexible = 0, any_object = 0, any_flexible_userloops = 0; @@ -927,7 +920,8 @@ static int get_ufunc_arguments(PyUFuncObject *ufunc, } #endif - if (PyBytes_AsStringAndSize(key, &str, &length) == -1) { + if (PyBytes_AsStringAndSize(key, &str, &length) < 0) { + PyErr_Clear(); PyErr_SetString(PyExc_TypeError, "invalid keyword argument"); goto fail; } @@ -1762,7 +1756,7 @@ make_arr_prep_args(npy_intp nin, PyObject *args, PyObject *kwds) * - ufunc_name: name of ufunc */ static int -_check_ufunc_fperr(int errmask, PyObject *extobj, char* ufunc_name) { +_check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name) { int fperr; PyObject *errobj = NULL; int ret; @@ -1778,7 +1772,7 @@ _check_ufunc_fperr(int errmask, PyObject *extobj, char* ufunc_name) { /* Get error object globals */ if (extobj == NULL) { - extobj = _get_global_ext_obj(ufunc_name); + extobj = get_global_ext_obj(); } if (_extract_pyvals(extobj, ufunc_name, NULL, NULL, &errobj) < 0) { @@ -1800,7 +1794,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, { int nin, nout; int i, j, idim, nop; - char *ufunc_name; + const char *ufunc_name; int retval = -1, subok = 1; int needs_api = 0; @@ -1907,60 +1901,67 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, } /* - * Validate the core dimensions of all the operands, - * and collect all of the labeled core dimension sizes - * into the array 'core_dim_sizes'. Initialize them to - * 1, for example in the case where the operand broadcasts - * to a core dimension, it won't be visited. + * Validate the core dimensions of all the operands, and collect all of + * the labelled core dimensions into 'core_dim_sizes'. + * + * The behavior has been changed in Numpy 1.10.0, and the following + * requirements must be fulfilled or an error will be raised: + * * Arguments, both input and output, must have at least as many + * dimensions as the corresponding number of core dimensions. In + * previous versions, 1's were prepended to the shape as needed. + * * Core dimensions with same labels must have exactly matching sizes. + * In previous versions, core dimensions of size 1 would broadcast + * against other core dimensions with the same label. + * * All core dimensions must have their size specified by a passed in + * input or output argument. In previous versions, core dimensions in + * an output argument that were not specified in an input argument, + * and whose size could not be inferred from a passed in output + * argument, would have their size set to 1. */ for (i = 0; i < ufunc->core_num_dim_ix; ++i) { - core_dim_sizes[i] = 1; + core_dim_sizes[i] = -1; } for (i = 0; i < nop; ++i) { if (op[i] != NULL) { int dim_offset = ufunc->core_offsets[i]; int num_dims = ufunc->core_num_dims[i]; int core_start_dim = PyArray_NDIM(op[i]) - num_dims; - /* Make sure any output operand has enough dimensions */ - if (i >= nin && core_start_dim < 0) { + + /* Check if operands have enough dimensions */ + if (core_start_dim < 0) { PyErr_Format(PyExc_ValueError, - "%s: Output operand %d does not have enough dimensions " - "(has %d, gufunc core with signature %s " - "requires %d)", - ufunc_name, i - nin, PyArray_NDIM(op[i]), + "%s: %s operand %d does not have enough " + "dimensions (has %d, gufunc core with " + "signature %s requires %d)", + ufunc_name, i < nin ? "Input" : "Output", + i < nin ? i : i - nin, PyArray_NDIM(op[i]), ufunc->core_signature, num_dims); retval = -1; goto fail; } /* - * Make sure each core dimension matches all other core - * dimensions with the same label - * - * NOTE: For input operands, core_start_dim may be negative. - * In that case, the operand is being broadcast onto - * core dimensions. For example, a scalar will broadcast - * to fit any core signature. + * Make sure every core dimension exactly matches all other core + * dimensions with the same label. */ - if (core_start_dim >= 0) { - idim = 0; - } else { - idim = -core_start_dim; - } - for (; idim < num_dims; ++idim) { - int core_dim_index = ufunc->core_dim_ixs[dim_offset + idim]; + for (idim = 0; idim < num_dims; ++idim) { + int core_dim_index = ufunc->core_dim_ixs[dim_offset+idim]; npy_intp op_dim_size = - PyArray_SHAPE(op[i])[core_start_dim + idim]; - if (core_dim_sizes[core_dim_index] == 1) { + PyArray_DIM(op[i], core_start_dim+idim); + + if (core_dim_sizes[core_dim_index] == -1) { core_dim_sizes[core_dim_index] = op_dim_size; - } else if ((i >= nin || op_dim_size != 1) && - core_dim_sizes[core_dim_index] != op_dim_size) { + } + else if (op_dim_size != core_dim_sizes[core_dim_index]) { PyErr_Format(PyExc_ValueError, - "%s: Operand %d has a mismatch in its core " - "dimension %d, with gufunc signature %s " - "(size %zd is different from %zd)", - ufunc_name, i, idim, ufunc->core_signature, - op_dim_size, core_dim_sizes[core_dim_index]); + "%s: %s operand %d has a mismatch in its " + "core dimension %d, with gufunc " + "signature %s (size %zd is different " + "from %zd)", + ufunc_name, i < nin ? "Input" : "Output", + i < nin ? i : i - nin, idim, + ufunc->core_signature, op_dim_size, + core_dim_sizes[core_dim_index]); retval = -1; goto fail; } @@ -1968,6 +1969,44 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, } } + /* + * Make sure no core dimension is unspecified. + */ + for (i = 0; i < ufunc->core_num_dim_ix; ++i) { + if (core_dim_sizes[i] == -1) { + break; + } + } + if (i != ufunc->core_num_dim_ix) { + /* + * There is at least one core dimension missing, find in which + * operand it comes up first (it has to be an output operand). + */ + const int missing_core_dim = i; + int out_op; + for (out_op = nin; out_op < nop; ++out_op) { + int first_idx = ufunc->core_offsets[out_op]; + int last_idx = first_idx + ufunc->core_num_dims[out_op]; + for (i = first_idx; i < last_idx; ++i) { + if (ufunc->core_dim_ixs[i] == missing_core_dim) { + break; + } + } + if (i < last_idx) { + /* Change index offsets for error message */ + out_op -= nin; + i -= first_idx; + break; + } + } + PyErr_Format(PyExc_ValueError, + "%s: Output operand %d has core dimension %d " + "unspecified, with gufunc signature %s", + ufunc_name, out_op, i, ufunc->core_signature); + retval = -1; + goto fail; + } + /* Fill in the initial part of 'iter_shape' */ for (idim = 0; idim < broadcast_ndim; ++idim) { iter_shape[idim] = -1; @@ -2325,7 +2364,7 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc, { int nin, nout; int i, nop; - char *ufunc_name; + const char *ufunc_name; int retval = -1, subok = 1; int need_fancy = 0; @@ -2640,7 +2679,7 @@ reduce_type_resolver(PyUFuncObject *ufunc, PyArrayObject *arr, int i, retcode; PyArrayObject *op[3] = {arr, arr, NULL}; PyArray_Descr *dtypes[3] = {NULL, NULL, NULL}; - char *ufunc_name = ufunc->name ? ufunc->name : "(unknown)"; + const char *ufunc_name = ufunc->name ? ufunc->name : "(unknown)"; PyObject *type_tup = NULL; *out_dtype = NULL; @@ -2816,7 +2855,7 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, PyArray_Descr *dtype; PyArrayObject *result; PyArray_AssignReduceIdentityFunc *assign_identity = NULL; - char *ufunc_name = ufunc->name ? ufunc->name : "(unknown)"; + const char *ufunc_name = ufunc->name ? ufunc->name : "(unknown)"; /* These parameters come from a TLS global */ int buffersize = 0, errormask = 0; @@ -2912,7 +2951,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, PyUFuncGenericFunction innerloop = NULL; void *innerloopdata = NULL; - char *ufunc_name = ufunc->name ? ufunc->name : "(unknown)"; + const char *ufunc_name = ufunc->name ? ufunc->name : "(unknown)"; /* These parameters come from extobj= or from a TLS global */ int buffersize = 0, errormask = 0; @@ -3265,7 +3304,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, PyUFuncGenericFunction innerloop = NULL; void *innerloopdata = NULL; - char *ufunc_name = ufunc->name ? ufunc->name : "(unknown)"; + const char *ufunc_name = ufunc->name ? ufunc->name : "(unknown)"; char *opname = "reduceat"; /* These parameters come from extobj= or from a TLS global */ @@ -3750,7 +3789,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, } for (i = 0; i < naxes; ++i) { PyObject *tmp = PyTuple_GET_ITEM(axes_in, i); - long axis = PyInt_AsLong(tmp); + int axis = PyArray_PyIntAsInt(tmp); if (axis == -1 && PyErr_Occurred()) { Py_XDECREF(otype); Py_DECREF(mp); @@ -3771,7 +3810,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, } /* Try to interpret axis as an integer */ else { - long axis = PyInt_AsLong(axes_in); + int axis = PyArray_PyIntAsInt(axes_in); /* TODO: PyNumber_Index would be good to use here */ if (axis == -1 && PyErr_Occurred()) { Py_XDECREF(otype); @@ -4308,7 +4347,7 @@ NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void **data, char *types, int ntypes, int nin, int nout, int identity, - char *name, char *doc, int check_return) + const char *name, const char *doc, int check_return) { return PyUFunc_FromFuncAndDataAndSignature(func, data, types, ntypes, nin, nout, identity, name, doc, check_return, NULL); @@ -4319,7 +4358,7 @@ NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data, char *types, int ntypes, int nin, int nout, int identity, - char *name, char *doc, + const char *name, const char *doc, int check_return, const char *signature) { PyUFuncObject *ufunc; @@ -4731,10 +4770,7 @@ ufunc_dealloc(PyUFuncObject *ufunc) static PyObject * ufunc_repr(PyUFuncObject *ufunc) { - char buf[100]; - - sprintf(buf, "<ufunc '%.50s'>", ufunc->name); - return PyUString_FromString(buf); + return PyUString_FromFormat("<ufunc '%s'>", ufunc->name); } diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index ffdb15bbe..ec28bb9e4 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -58,7 +58,7 @@ PyUFunc_ValidateCasting(PyUFuncObject *ufunc, PyArray_Descr **dtypes) { int i, nin = ufunc->nin, nop = nin + ufunc->nout; - char *ufunc_name; + const char *ufunc_name; ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>"; @@ -186,7 +186,7 @@ PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc, PyArray_Descr **out_dtypes) { int i, type_num1, type_num2; - char *ufunc_name; + const char *ufunc_name; ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>"; @@ -292,7 +292,7 @@ PyUFunc_SimpleUnaryOperationTypeResolver(PyUFuncObject *ufunc, PyArray_Descr **out_dtypes) { int i, type_num1; - char *ufunc_name; + const char *ufunc_name; ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>"; @@ -433,7 +433,7 @@ PyUFunc_SimpleBinaryOperationTypeResolver(PyUFuncObject *ufunc, PyArray_Descr **out_dtypes) { int i, type_num1, type_num2; - char *ufunc_name; + const char *ufunc_name; ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>"; @@ -591,7 +591,7 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc, { int type_num1, type_num2; int i; - char *ufunc_name; + const char *ufunc_name; ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>"; @@ -781,7 +781,7 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc, { int type_num1, type_num2; int i; - char *ufunc_name; + const char *ufunc_name; ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>"; @@ -963,7 +963,7 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc, { int type_num1, type_num2; int i; - char *ufunc_name; + const char *ufunc_name; ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>"; @@ -1106,7 +1106,7 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc, { int type_num1, type_num2; int i; - char *ufunc_name; + const char *ufunc_name; ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>"; @@ -1875,7 +1875,7 @@ linear_search_type_resolver(PyUFuncObject *self, { npy_intp i, j, nin = self->nin, nop = nin + self->nout; int types[NPY_MAXARGS]; - char *ufunc_name; + const char *ufunc_name; int no_castable_output, use_min_scalar; /* For making a better error message on coercion error */ @@ -1984,7 +1984,7 @@ type_tuple_type_resolver(PyUFuncObject *self, npy_intp i, j, n, nin = self->nin, nop = nin + self->nout; int n_specified = 0; int specified_types[NPY_MAXARGS], types[NPY_MAXARGS]; - char *ufunc_name; + const char *ufunc_name; int no_castable_output, use_min_scalar; /* For making a better error message on coercion error */ @@ -2044,7 +2044,7 @@ type_tuple_type_resolver(PyUFuncObject *self, type_tup = str_obj; } - if (!PyBytes_AsStringAndSize(type_tup, &str, &length) < 0) { + if (PyBytes_AsStringAndSize(type_tup, &str, &length) < 0) { Py_XDECREF(str_obj); return -1; } diff --git a/numpy/core/src/umath/umath_tests.c.src b/numpy/core/src/umath/umath_tests.c.src index 46d06288d..33d9846bd 100644 --- a/numpy/core/src/umath/umath_tests.c.src +++ b/numpy/core/src/umath/umath_tests.c.src @@ -38,6 +38,9 @@ INIT_OUTER_LOOP_3 \ npy_intp s3 = *steps++; +#define BEGIN_OUTER_LOOP_2 \ + for (N_ = 0; N_ < dN; N_++, args[0] += s0, args[1] += s1) { + #define BEGIN_OUTER_LOOP_3 \ for (N_ = 0; N_ < dN; N_++, args[0] += s0, args[1] += s1, args[2] += s2) { @@ -58,13 +61,14 @@ char *inner1d_signature = "(i),(i)->()"; /**begin repeat #TYPE=LONG,DOUBLE# - #typ=npy_long, npy_double# + #typ=npy_long,npy_double# */ /* * This implements the function * out[n] = sum_i { in1[n, i] * in2[n, i] }. */ + static void @TYPE@_inner1d(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { @@ -91,7 +95,7 @@ char *innerwt_signature = "(i),(i),(i)->()"; /**begin repeat #TYPE=LONG,DOUBLE# - #typ=npy_long, npy_double# + #typ=npy_long,npy_double# */ @@ -127,7 +131,7 @@ char *matrix_multiply_signature = "(m,n),(n,p)->(m,p)"; /**begin repeat #TYPE=FLOAT,DOUBLE,LONG# - #typ=npy_float, npy_double,npy_long# + #typ=npy_float,npy_double,npy_long# */ /* @@ -135,7 +139,6 @@ char *matrix_multiply_signature = "(m,n),(n,p)->(m,p)"; * out[k, m, p] = sum_n { in1[k, m, n] * in2[k, n, p] }. */ - static void @TYPE@_matrix_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { @@ -177,27 +180,66 @@ static void /**end repeat**/ -/* The following lines were generated using a slightly modified - version of code_generators/generate_umath.py and adding these - lines to defdict: - -defdict = { -'inner1d' : - Ufunc(2, 1, None_, - r'''inner on the last dimension and broadcast on the rest \n" - " \"(i),(i)->()\" \n''', - TD('ld'), - ), -'innerwt' : - Ufunc(3, 1, None_, - r'''inner1d with a weight argument \n" - " \"(i),(i),(i)->()\" \n''', - TD('ld'), - ), -} +char *euclidean_pdist_signature = "(n,d)->(p)"; +/**begin repeat + + #TYPE=FLOAT,DOUBLE# + #typ=npy_float,npy_double# + #sqrt_func=sqrtf,sqrt# */ +/* + * This implements the function + * out[j*(2*n-3-j)+k-1] = sum_d { (in1[j, d] - in1[k, d])^2 } + * with 0 < k < j < n, i.e. computes all unique pairwise euclidean distances. + */ + +static void +@TYPE@_euclidean_pdist(char **args, npy_intp *dimensions, npy_intp *steps, + void *NPY_UNUSED(func)) +{ + INIT_OUTER_LOOP_2 + npy_intp len_n = *dimensions++; + npy_intp len_d = *dimensions++; + npy_intp len_p = *dimensions; + npy_intp stride_n = *steps++; + npy_intp stride_d = *steps++; + npy_intp stride_p = *steps; + + assert(len_n * (len_n - 1) / 2 == len_p); + + BEGIN_OUTER_LOOP_2 + const char *data_this = (const char *)args[0]; + char *data_out = args[1]; + npy_intp n; + for (n = 0; n < len_n; ++n) { + const char *data_that = data_this + stride_n; + npy_intp nn; + for (nn = n + 1; nn < len_n; ++nn) { + const char *ptr_this = data_this; + const char *ptr_that = data_that; + @typ@ out = 0; + npy_intp d; + for (d = 0; d < len_d; ++d) { + const @typ@ delta = *(const @typ@ *)ptr_this - + *(const @typ@ *)ptr_that; + out += delta * delta; + ptr_this += stride_d; + ptr_that += stride_d; + } + *(@typ@ *)data_out = @sqrt_func@(out); + data_that += stride_n; + data_out += stride_p; + } + data_this += stride_n; + } + END_OUTER_LOOP +} + +/**end repeat**/ + + static PyUFuncGenericFunction inner1d_functions[] = { LONG_inner1d, DOUBLE_inner1d }; static void * inner1d_data[] = { (void *)NULL, (void *)NULL }; static char inner1d_signatures[] = { NPY_LONG, NPY_LONG, NPY_LONG, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; @@ -208,39 +250,49 @@ static PyUFuncGenericFunction matrix_multiply_functions[] = { LONG_matrix_multip static void *matrix_multiply_data[] = { (void *)NULL, (void *)NULL, (void *)NULL }; static char matrix_multiply_signatures[] = { NPY_LONG, NPY_LONG, NPY_LONG, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; +static PyUFuncGenericFunction euclidean_pdist_functions[] = + { FLOAT_euclidean_pdist, DOUBLE_euclidean_pdist }; +static void *eucldiean_pdist_data[] = { (void *)NULL, (void *)NULL }; +static char euclidean_pdist_signatures[] = { NPY_FLOAT, NPY_FLOAT, + NPY_DOUBLE, NPY_DOUBLE }; + + static void addUfuncs(PyObject *dictionary) { PyObject *f; - f = PyUFunc_FromFuncAndDataAndSignature(inner1d_functions, inner1d_data, inner1d_signatures, 2, - 2, 1, PyUFunc_None, "inner1d", - "inner on the last dimension and broadcast on the rest \n"\ - " \"(i),(i)->()\" \n", - 0, inner1d_signature); + f = PyUFunc_FromFuncAndDataAndSignature(inner1d_functions, inner1d_data, + inner1d_signatures, 2, 2, 1, PyUFunc_None, "inner1d", + "inner on the last dimension and broadcast on the rest \n" + " \"(i),(i)->()\" \n", + 0, inner1d_signature); PyDict_SetItemString(dictionary, "inner1d", f); Py_DECREF(f); - f = PyUFunc_FromFuncAndDataAndSignature(innerwt_functions, innerwt_data, innerwt_signatures, 2, - 3, 1, PyUFunc_None, "innerwt", - "inner1d with a weight argument \n"\ - " \"(i),(i),(i)->()\" \n", - 0, innerwt_signature); + f = PyUFunc_FromFuncAndDataAndSignature(innerwt_functions, innerwt_data, + innerwt_signatures, 2, 3, 1, PyUFunc_None, "innerwt", + "inner1d with a weight argument \n" + " \"(i),(i),(i)->()\" \n", + 0, innerwt_signature); PyDict_SetItemString(dictionary, "innerwt", f); Py_DECREF(f); f = PyUFunc_FromFuncAndDataAndSignature(matrix_multiply_functions, - matrix_multiply_data, matrix_multiply_signatures, - 3, 2, 1, PyUFunc_None, "matrix_multiply", - "matrix multiplication on last two dimensions \n"\ - " \"(m,n),(n,p)->(m,p)\" \n", - 0, matrix_multiply_signature); + matrix_multiply_data, matrix_multiply_signatures, + 3, 2, 1, PyUFunc_None, "matrix_multiply", + "matrix multiplication on last two dimensions \n" + " \"(m,n),(n,p)->(m,p)\" \n", + 0, matrix_multiply_signature); PyDict_SetItemString(dictionary, "matrix_multiply", f); Py_DECREF(f); + f = PyUFunc_FromFuncAndDataAndSignature(euclidean_pdist_functions, + eucldiean_pdist_data, euclidean_pdist_signatures, + 2, 1, 1, PyUFunc_None, "euclidean_pdist", + "pairwise euclidean distance on last two dimensions \n" + " \"(n,d)->(p)\" \n", + 0, euclidean_pdist_signature); + PyDict_SetItemString(dictionary, "euclidean_pdist", f); + Py_DECREF(f); } -/* - End of auto-generated code. -*/ - - static PyObject * UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args) diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c index 3ed7ee771..57b2bb239 100644 --- a/numpy/core/src/umath/umathmodule.c +++ b/numpy/core/src/umath/umathmodule.c @@ -54,16 +54,15 @@ object_ufunc_type_resolver(PyUFuncObject *ufunc, PyArray_Descr **out_dtypes) { int i, nop = ufunc->nin + ufunc->nout; - PyArray_Descr *obj_dtype; - obj_dtype = PyArray_DescrFromType(NPY_OBJECT); - if (obj_dtype == NULL) { + out_dtypes[0] = PyArray_DescrFromType(NPY_OBJECT); + if (out_dtypes[0] == NULL) { return -1; } - for (i = 0; i < nop; ++i) { - Py_INCREF(obj_dtype); - out_dtypes[i] = obj_dtype; + for (i = 1; i < nop; ++i) { + Py_INCREF(out_dtypes[0]); + out_dtypes[i] = out_dtypes[0]; } return 0; @@ -202,182 +201,6 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS ***************************************************************************** */ -/* Less automated additions to the ufuncs */ - -static PyUFuncGenericFunction frexp_functions[] = { -#ifdef HAVE_FREXPF - HALF_frexp, - FLOAT_frexp, -#endif - DOUBLE_frexp -#ifdef HAVE_FREXPL - ,LONGDOUBLE_frexp -#endif -}; - -static void * blank3_data[] = { (void *)NULL, (void *)NULL, (void *)NULL}; -static void * blank6_data[] = { (void *)NULL, (void *)NULL, (void *)NULL, - (void *)NULL, (void *)NULL, (void *)NULL}; -static char frexp_signatures[] = { -#ifdef HAVE_FREXPF - NPY_HALF, NPY_HALF, NPY_INT, - NPY_FLOAT, NPY_FLOAT, NPY_INT, -#endif - NPY_DOUBLE, NPY_DOUBLE, NPY_INT -#ifdef HAVE_FREXPL - ,NPY_LONGDOUBLE, NPY_LONGDOUBLE, NPY_INT -#endif -}; - -#if NPY_SIZEOF_LONG == NPY_SIZEOF_INT -#define LDEXP_LONG(typ) typ##_ldexp -#else -#define LDEXP_LONG(typ) typ##_ldexp_long -#endif - -static PyUFuncGenericFunction ldexp_functions[] = { -#ifdef HAVE_LDEXPF - HALF_ldexp, - FLOAT_ldexp, - LDEXP_LONG(HALF), - LDEXP_LONG(FLOAT), -#endif - DOUBLE_ldexp, - LDEXP_LONG(DOUBLE) -#ifdef HAVE_LDEXPL - , - LONGDOUBLE_ldexp, - LDEXP_LONG(LONGDOUBLE) -#endif -}; - -static const char frdoc[] = - " Decompose the elements of x into mantissa and twos exponent.\n" - "\n" - " Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.\n" - " The mantissa is lies in the open interval(-1, 1), while the twos\n" - " exponent is a signed integer.\n" - "\n" - " Parameters\n" - " ----------\n" - " x : array_like\n" - " Array of numbers to be decomposed.\n" - " out1: ndarray, optional\n" - " Output array for the mantissa. Must have the same shape as `x`.\n" - " out2: ndarray, optional\n" - " Output array for the exponent. Must have the same shape as `x`.\n" - "\n" - " Returns\n" - " -------\n" - " (mantissa, exponent) : tuple of ndarrays, (float, int)\n" - " `mantissa` is a float array with values between -1 and 1.\n" - " `exponent` is an int array which represents the exponent of 2.\n" - "\n" - " See Also\n" - " --------\n" - " ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.\n" - "\n" - " Notes\n" - " -----\n" - " Complex dtypes are not supported, they will raise a TypeError.\n" - "\n" - " Examples\n" - " --------\n" - " >>> x = np.arange(9)\n" - " >>> y1, y2 = np.frexp(x)\n" - " >>> y1\n" - " array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,\n" - " 0.5 ])\n" - " >>> y2\n" - " array([0, 1, 2, 2, 3, 3, 3, 3, 4])\n" - " >>> y1 * 2**y2\n" - " array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])\n" - "\n"; - - -static char ldexp_signatures[] = { -#ifdef HAVE_LDEXPF - NPY_HALF, NPY_INT, NPY_HALF, - NPY_FLOAT, NPY_INT, NPY_FLOAT, - NPY_HALF, NPY_LONG, NPY_HALF, - NPY_FLOAT, NPY_LONG, NPY_FLOAT, -#endif - NPY_DOUBLE, NPY_INT, NPY_DOUBLE, - NPY_DOUBLE, NPY_LONG, NPY_DOUBLE -#ifdef HAVE_LDEXPL - ,NPY_LONGDOUBLE, NPY_INT, NPY_LONGDOUBLE - ,NPY_LONGDOUBLE, NPY_LONG, NPY_LONGDOUBLE -#endif -}; - -static const char lddoc[] = - " Returns x1 * 2**x2, element-wise.\n" - "\n" - " The mantissas `x1` and twos exponents `x2` are used to construct\n" - " floating point numbers ``x1 * 2**x2``.\n" - "\n" - " Parameters\n" - " ----------\n" - " x1 : array_like\n" - " Array of multipliers.\n" - " x2 : array_like, int\n" - " Array of twos exponents.\n" - " out : ndarray, optional\n" - " Output array for the result.\n" - "\n" - " Returns\n" - " -------\n" - " y : ndarray or scalar\n" - " The result of ``x1 * 2**x2``.\n" - "\n" - " See Also\n" - " --------\n" - " frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.\n" - "\n" - " Notes\n" - " -----\n" - " Complex dtypes are not supported, they will raise a TypeError.\n" - "\n" - " `ldexp` is useful as the inverse of `frexp`, if used by itself it is\n" - " more clear to simply use the expression ``x1 * 2**x2``.\n" - "\n" - " Examples\n" - " --------\n" - " >>> np.ldexp(5, np.arange(4))\n" - " array([ 5., 10., 20., 40.], dtype=float32)\n" - "\n" - " >>> x = np.arange(6)\n" - " >>> np.ldexp(*np.frexp(x))\n" - " array([ 0., 1., 2., 3., 4., 5.])\n" - "\n"; - - -static void -InitOtherOperators(PyObject *dictionary) { - PyObject *f; - int num; - - num = sizeof(frexp_functions) / sizeof(frexp_functions[0]); - f = PyUFunc_FromFuncAndData(frexp_functions, blank3_data, - frexp_signatures, num, - 1, 2, PyUFunc_None, "frexp", frdoc, 0); - PyDict_SetItemString(dictionary, "frexp", f); - Py_DECREF(f); - - num = sizeof(ldexp_functions) / sizeof(ldexp_functions[0]); - f = PyUFunc_FromFuncAndData(ldexp_functions, blank6_data, - ldexp_signatures, num, - 2, 1, PyUFunc_None, "ldexp", lddoc, 0); - PyDict_SetItemString(dictionary, "ldexp", f); - Py_DECREF(f); - -#if defined(NPY_PY3K) - f = PyDict_GetItemString(dictionary, "true_divide"); - PyDict_SetItemString(dictionary, "divide", f); -#endif - return; -} - NPY_VISIBILITY_HIDDEN PyObject * npy_um_str_out = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_um_str_subok = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_um_str_array_prepare = NULL; @@ -493,8 +316,6 @@ PyMODINIT_FUNC initumath(void) /* Load the ufunc operators into the array module's namespace */ InitOperators(d); - InitOtherOperators(d); - PyDict_SetItemString(d, "pi", s = PyFloat_FromDouble(NPY_PI)); Py_DECREF(s); PyDict_SetItemString(d, "e", s = PyFloat_FromDouble(NPY_E)); @@ -537,6 +358,11 @@ PyMODINIT_FUNC initumath(void) PyModule_AddObject(m, "NZERO", PyFloat_FromDouble(NPY_NZERO)); PyModule_AddObject(m, "NAN", PyFloat_FromDouble(NPY_NAN)); +#if defined(NPY_PY3K) + s = PyDict_GetItemString(d, "true_divide"); + PyDict_SetItemString(d, "divide", s); +#endif + s = PyDict_GetItemString(d, "conjugate"); s2 = PyDict_GetItemString(d, "remainder"); /* Setup the array object's numerical structures with appropriate diff --git a/numpy/core/tests/test_abc.py b/numpy/core/tests/test_abc.py index 54edd7e90..2430866fd 100644 --- a/numpy/core/tests/test_abc.py +++ b/numpy/core/tests/test_abc.py @@ -1,7 +1,6 @@ from __future__ import division, absolute_import, print_function -import numpy as np -from numpy.testing import TestCase, assert_ +from numpy.testing import TestCase, assert_, run_module_suite import numbers from numpy.core.numerictypes import sctypes @@ -43,3 +42,6 @@ class ABC(TestCase): assert_(issubclass(t, numbers.Integral), "{0} is not subclass of Integral".format(t.__name__)) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py index 2fd6463c7..ebb8e4b1b 100644 --- a/numpy/core/tests/test_api.py +++ b/numpy/core/tests/test_api.py @@ -377,7 +377,7 @@ def test_copyto_permut(): r = np.zeros(power) mask = np.array(l) imask = np.array(l).view(np.uint8) - imask[mask != 0] = 0xFF + imask[mask != 0] = c np.copyto(r, d, where=mask) assert_array_equal(r == 1, l) assert_equal(r.sum(), sum(l)) diff --git a/numpy/core/tests/test_blasdot.py b/numpy/core/tests/test_blasdot.py deleted file mode 100644 index caa576abc..000000000 --- a/numpy/core/tests/test_blasdot.py +++ /dev/null @@ -1,171 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -import sys -from numpy.core import zeros, float64 -from numpy.testing import dec, TestCase, assert_almost_equal, assert_, \ - assert_raises, assert_array_equal, assert_allclose, assert_equal -from numpy.core.multiarray import inner as inner_ - -DECPREC = 14 - -class TestInner(TestCase): - def test_vecself(self): - """Ticket 844.""" - # Inner product of a vector with itself segfaults or give meaningless - # result - a = zeros(shape = (1, 80), dtype = float64) - p = inner_(a, a) - assert_almost_equal(p, 0, decimal = DECPREC) - -try: - import numpy.core._dotblas as _dotblas -except ImportError: - _dotblas = None - -@dec.skipif(_dotblas is None, "Numpy is not compiled with _dotblas") -def test_blasdot_used(): - from numpy.core import dot, vdot, inner, alterdot, restoredot - assert_(dot is _dotblas.dot) - assert_(vdot is _dotblas.vdot) - assert_(inner is _dotblas.inner) - assert_(alterdot is _dotblas.alterdot) - assert_(restoredot is _dotblas.restoredot) - - -def test_dot_2args(): - from numpy.core import dot - - a = np.array([[1, 2], [3, 4]], dtype=float) - b = np.array([[1, 0], [1, 1]], dtype=float) - c = np.array([[3, 2], [7, 4]], dtype=float) - - d = dot(a, b) - assert_allclose(c, d) - -def test_dot_3args(): - np.random.seed(22) - f = np.random.random_sample((1024, 16)) - v = np.random.random_sample((16, 32)) - - r = np.empty((1024, 32)) - for i in range(12): - np.dot(f, v, r) - assert_equal(sys.getrefcount(r), 2) - r2 = np.dot(f, v, out=None) - assert_array_equal(r2, r) - assert_(r is np.dot(f, v, out=r)) - - v = v[:, 0].copy() # v.shape == (16,) - r = r[:, 0].copy() # r.shape == (1024,) - r2 = np.dot(f, v) - assert_(r is np.dot(f, v, r)) - assert_array_equal(r2, r) - -def test_dot_3args_errors(): - np.random.seed(22) - f = np.random.random_sample((1024, 16)) - v = np.random.random_sample((16, 32)) - - r = np.empty((1024, 31)) - assert_raises(ValueError, np.dot, f, v, r) - - r = np.empty((1024,)) - assert_raises(ValueError, np.dot, f, v, r) - - r = np.empty((32,)) - assert_raises(ValueError, np.dot, f, v, r) - - r = np.empty((32, 1024)) - assert_raises(ValueError, np.dot, f, v, r) - assert_raises(ValueError, np.dot, f, v, r.T) - - r = np.empty((1024, 64)) - assert_raises(ValueError, np.dot, f, v, r[:, ::2]) - assert_raises(ValueError, np.dot, f, v, r[:, :32]) - - r = np.empty((1024, 32), dtype=np.float32) - assert_raises(ValueError, np.dot, f, v, r) - - r = np.empty((1024, 32), dtype=int) - assert_raises(ValueError, np.dot, f, v, r) - -def test_dot_array_order(): - """ Test numpy dot with different order C, F - - Comparing results with multiarray dot. - Double and single precisions array are compared using relative - precision of 7 and 5 decimals respectively. - Use 30 decimal when comparing exact operations like: - (a.b)' = b'.a' - """ - _dot = np.core.multiarray.dot - a_dim, b_dim, c_dim = 10, 4, 7 - orders = ["C", "F"] - dtypes_prec = {np.float64: 7, np.float32: 5} - np.random.seed(7) - - for arr_type, prec in dtypes_prec.items(): - for a_order in orders: - a = np.asarray(np.random.randn(a_dim, a_dim), - dtype=arr_type, order=a_order) - assert_array_equal(np.dot(a, a), a.dot(a)) - # (a.a)' = a'.a', note that mse~=1e-31 needs almost_equal - assert_almost_equal(a.dot(a), a.T.dot(a.T).T, decimal=prec) - - # - # Check with making explicit copy - # - a_T = a.T.copy(order=a_order) - assert_almost_equal(a_T.dot(a_T), a.T.dot(a.T), decimal=prec) - assert_almost_equal(a.dot(a_T), a.dot(a.T), decimal=prec) - assert_almost_equal(a_T.dot(a), a.T.dot(a), decimal=prec) - - # - # Compare with multiarray dot - # - assert_almost_equal(a.dot(a), _dot(a, a), decimal=prec) - assert_almost_equal(a.T.dot(a), _dot(a.T, a), decimal=prec) - assert_almost_equal(a.dot(a.T), _dot(a, a.T), decimal=prec) - assert_almost_equal(a.T.dot(a.T), _dot(a.T, a.T), decimal=prec) - for res in a.dot(a), a.T.dot(a), a.dot(a.T), a.T.dot(a.T): - assert res.flags.c_contiguous - - for b_order in orders: - b = np.asarray(np.random.randn(a_dim, b_dim), - dtype=arr_type, order=b_order) - b_T = b.T.copy(order=b_order) - assert_almost_equal(a_T.dot(b), a.T.dot(b), decimal=prec) - assert_almost_equal(b_T.dot(a), b.T.dot(a), decimal=prec) - # (b'.a)' = a'.b - assert_almost_equal(b.T.dot(a), a.T.dot(b).T, decimal=prec) - assert_almost_equal(a.dot(b), _dot(a, b), decimal=prec) - assert_almost_equal(b.T.dot(a), _dot(b.T, a), decimal=prec) - - - for c_order in orders: - c = np.asarray(np.random.randn(b_dim, c_dim), - dtype=arr_type, order=c_order) - c_T = c.T.copy(order=c_order) - assert_almost_equal(c.T.dot(b.T), c_T.dot(b_T), decimal=prec) - assert_almost_equal(c.T.dot(b.T).T, b.dot(c), decimal=prec) - assert_almost_equal(b.dot(c), _dot(b, c), decimal=prec) - assert_almost_equal(c.T.dot(b.T), _dot(c.T, b.T), decimal=prec) - -def test_dot_override(): - class A(object): - def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): - return "A" - - class B(object): - def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): - return NotImplemented - - a = A() - b = B() - c = np.array([[1]]) - - assert_equal(np.dot(a, b), "A") - assert_equal(c.dot(a), "A") - assert_raises(TypeError, np.dot, b, c) - assert_raises(TypeError, c.dot, b) diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index a1f4664a5..9e2248205 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -5,14 +5,12 @@ to document how deprecations should eventually be turned into errors. """ from __future__ import division, absolute_import, print_function -import sys import operator import warnings -from nose.plugins.skip import SkipTest import numpy as np -from numpy.testing import (dec, run_module_suite, assert_raises, - assert_warns, assert_array_equal) +from numpy.testing import (run_module_suite, assert_raises, + assert_warns, assert_array_equal, assert_) class _DeprecationTestCase(object): @@ -34,11 +32,9 @@ class _DeprecationTestCase(object): warnings.filterwarnings("always", message=self.message, category=DeprecationWarning) - def tearDown(self): self.warn_ctx.__exit__() - def assert_deprecated(self, function, num=1, ignore_others=False, function_fails=False, exceptions=(DeprecationWarning,), args=(), kwargs={}): @@ -102,7 +98,6 @@ class _DeprecationTestCase(object): if exceptions == tuple(): raise AssertionError("Error raised during function call") - def assert_not_deprecated(self, function, args=(), kwargs={}): """Test if DeprecationWarnings are given and raised. @@ -143,6 +138,7 @@ class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase): def test_indexing(self): a = np.array([[[5]]]) + def assert_deprecated(*args, **kwargs): self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs) @@ -172,7 +168,6 @@ class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase): assert_deprecated(lambda: a[0.0:, 0.0], num=2) assert_deprecated(lambda: a[0.0:, 0.0,:], num=2) - def test_valid_indexing(self): a = np.array([[[5]]]) assert_not_deprecated = self.assert_not_deprecated @@ -183,9 +178,9 @@ class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase): assert_not_deprecated(lambda: a[:, 0,:]) assert_not_deprecated(lambda: a[:,:,:]) - def test_slicing(self): a = np.array([[5]]) + def assert_deprecated(*args, **kwargs): self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs) @@ -217,7 +212,6 @@ class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase): # should still get the DeprecationWarning if step = 0. assert_deprecated(lambda: a[::0.0], function_fails=True) - def test_valid_slicing(self): a = np.array([[[5]]]) assert_not_deprecated = self.assert_not_deprecated @@ -231,7 +225,6 @@ class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase): assert_not_deprecated(lambda: a[:2:2]) assert_not_deprecated(lambda: a[1:2:2]) - def test_non_integer_argument_deprecations(self): a = np.array([[5]]) @@ -240,7 +233,6 @@ class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase): self.assert_deprecated(np.take, args=(a, [0], 1.)) self.assert_deprecated(np.take, args=(a, [0], np.float64(1.))) - def test_non_integer_sequence_multiplication(self): # Numpy scalar sequence multiply should not work with non-integers def mult(a, b): @@ -248,6 +240,13 @@ class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase): self.assert_deprecated(mult, args=([1], np.float_(3))) self.assert_not_deprecated(mult, args=([1], np.int_(3))) + def test_reduce_axis_float_index(self): + d = np.zeros((3,3,3)) + self.assert_deprecated(np.min, args=(d, 0.5)) + self.assert_deprecated(np.min, num=1, args=(d, (0.5, 1))) + self.assert_deprecated(np.min, num=1, args=(d, (1, 2.2))) + self.assert_deprecated(np.min, num=2, args=(d, (.2, 1.2))) + class TestBooleanArgumentDeprecation(_DeprecationTestCase): """This tests that using a boolean as integer argument/indexing is @@ -295,7 +294,6 @@ class TestArrayToIndexDeprecation(_DeprecationTestCase): # Check slicing. Normal indexing checks arrays specifically. self.assert_deprecated(lambda: a[a:a:a], exceptions=(), num=3) - class TestNonIntegerArrayLike(_DeprecationTestCase): """Tests that array likes, i.e. lists give a deprecation warning when they cannot be safely cast to an integer. @@ -312,7 +310,6 @@ class TestNonIntegerArrayLike(_DeprecationTestCase): self.assert_not_deprecated(a.__getitem__, ([],)) - def test_boolean_futurewarning(self): a = np.arange(10) with warnings.catch_warnings(): @@ -370,12 +367,13 @@ class TestRankDeprecation(_DeprecationTestCase): """Test that np.rank is deprecated. The function should simply be removed. The VisibleDeprecationWarning may become unnecessary. """ + def test(self): a = np.arange(10) assert_warns(np.VisibleDeprecationWarning, np.rank, a) -class TestComparisonDepreactions(_DeprecationTestCase): +class TestComparisonDeprecations(_DeprecationTestCase): """This tests the deprecation, for non-elementwise comparison logic. This used to mean that when an error occured during element-wise comparison (i.e. broadcasting) NotImplemented was returned, but also in the comparison @@ -400,7 +398,6 @@ class TestComparisonDepreactions(_DeprecationTestCase): b = np.array([1, np.array([1,2,3])], dtype=object) self.assert_deprecated(op, args=(a, b), num=None) - def test_string(self): # For two string arrays, strings always raised the broadcasting error: a = np.array(['a', 'b']) @@ -412,7 +409,6 @@ class TestComparisonDepreactions(_DeprecationTestCase): # following works (and returns False) due to dtype mismatch: a == [] - def test_none_comparison(self): # Test comparison of None, which should result in elementwise # comparison in the future. [1, 2] == None should be [False, False]. @@ -426,15 +422,35 @@ class TestComparisonDepreactions(_DeprecationTestCase): assert_raises(FutureWarning, operator.eq, np.arange(3), None) assert_raises(FutureWarning, operator.ne, np.arange(3), None) + def test_scalar_none_comparison(self): + # Scalars should still just return false and not give a warnings. + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', FutureWarning) + assert_(not np.float32(1) == None) + assert_(not np.str_('test') == None) + # This is dubious (see below): + assert_(not np.datetime64('NaT') == None) + + assert_(np.float32(1) != None) + assert_(np.str_('test') != None) + # This is dubious (see below): + assert_(np.datetime64('NaT') != None) + assert_(len(w) == 0) + + # For documentaiton purpose, this is why the datetime is dubious. + # At the time of deprecation this was no behaviour change, but + # it has to be considered when the deprecations is done. + assert_(np.equal(np.datetime64('NaT'), None)) -class TestIdentityComparisonDepreactions(_DeprecationTestCase): + +class TestIdentityComparisonDeprecations(_DeprecationTestCase): """This tests the equal and not_equal object ufuncs identity check deprecation. This was due to the usage of PyObject_RichCompareBool. This tests that for example for `a = np.array([np.nan], dtype=object)` `a == a` it is warned that False and not `np.nan is np.nan` is returned. - Should be kept in sync with TestComparisonDepreactions and new tests + Should be kept in sync with TestComparisonDeprecations and new tests added when the deprecation is over. Requires only removing of @identity@ (and blocks) from the ufunc loops.c.src of the OBJECT comparisons. """ @@ -460,11 +476,11 @@ class TestIdentityComparisonDepreactions(_DeprecationTestCase): np.less_equal(a, a) np.greater_equal(a, a) - def test_comparison_error(self): class FunkyType(object): def __eq__(self, other): raise TypeError("I won't compare") + def __ne__(self, other): raise TypeError("I won't compare") @@ -472,7 +488,6 @@ class TestIdentityComparisonDepreactions(_DeprecationTestCase): self.assert_deprecated(np.equal, args=(a, a)) self.assert_deprecated(np.not_equal, args=(a, a)) - def test_bool_error(self): # The comparison result cannot be interpreted as a bool a = np.array([np.array([1, 2, 3]), None], dtype=object) @@ -480,5 +495,18 @@ class TestIdentityComparisonDepreactions(_DeprecationTestCase): self.assert_deprecated(np.not_equal, args=(a, a)) +class TestAlterdotRestoredotDeprecations(_DeprecationTestCase): + """The alterdot/restoredot functions are deprecated. + + These functions no longer do anything in numpy 1.10, so should not be + used. + + """ + + def test_alterdot_restoredot_deprecation(self): + self.assert_deprecated(np.alterdot) + self.assert_deprecated(np.restoredot) + + if __name__ == "__main__": run_module_suite() diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 18660351c..2621c8696 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -508,13 +508,8 @@ class TestDtypeAttributeDeletion(object): "isbuiltin", "isnative", "isalignedstruct", "fields", "metadata", "hasobject"] - if sys.version[:3] == '2.4': - error = TypeError - else: - error = AttributeError - for s in attr: - assert_raises(error, delattr, dt, s) + assert_raises(AttributeError, delattr, dt, s) def test_dtype_writable_attributes_deletion(self): diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py index f6ffd5a10..e8f9fb755 100644 --- a/numpy/core/tests/test_function_base.py +++ b/numpy/core/tests/test_function_base.py @@ -108,4 +108,8 @@ class TestLinspace(TestCase): a = PhysicalQuantity(0.0) b = PhysicalQuantity(1.0) - assert_equal(linspace(a, b), linspace(0.0, 1.0))
\ No newline at end of file + assert_equal(linspace(a, b), linspace(0.0, 1.0)) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py index 6b0b0a0b5..e59bb257b 100644 --- a/numpy/core/tests/test_indexing.py +++ b/numpy/core/tests/test_indexing.py @@ -147,7 +147,7 @@ class TestIndexing(TestCase): def test_boolean_assignment_value_mismatch(self): # A boolean assignment should fail when the shape of the values - # cannot be broadcasted to the subscription. (see also gh-3458) + # cannot be broadcast to the subscription. (see also gh-3458) a = np.arange(4) def f(a, v): a[a > -1] = v @@ -188,12 +188,12 @@ class TestIndexing(TestCase): # If the strides are not reversed, the 0 in the arange comes last. assert_equal(a[0], 0) - # This also tests that the subspace buffer is initiliazed: + # This also tests that the subspace buffer is initialized: a = np.ones((5, 2)) c = np.arange(10).reshape(5, 2)[::-1] a[b, :] = c assert_equal(a[0], [0, 1]) - + def test_reversed_strides_result_allocation(self): # Test a bug when calculating the output strides for a result array # when the subspace size was 1 (and test other cases as well) @@ -285,6 +285,17 @@ class TestIndexing(TestCase): assert_((a == 1).all()) + def test_subclass_writeable(self): + d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)], + dtype=[('target', 'S20'), ('V_mag', '>f4')]) + ind = np.array([False, True, True], dtype=bool) + assert_(d[ind].flags.writeable) + ind = np.array([0, 1]) + assert_(d[ind].flags.writeable) + assert_(d[...].flags.writeable) + assert_(d[0].flags.writeable) + + def test_memory_order(self): # This is not necessary to preserve. Memory layouts for # more complex indices are not as simple. @@ -335,7 +346,7 @@ class TestIndexing(TestCase): # Reference count of intp for index checks a = np.array([0]) refcount = sys.getrefcount(np.dtype(np.intp)) - # item setting always checks indices in seperate function: + # item setting always checks indices in separate function: a[np.array([0], dtype=np.intp)] = 1 a[np.array([0], dtype=np.uint8)] = 1 assert_raises(IndexError, a.__setitem__, @@ -367,6 +378,37 @@ class TestIndexing(TestCase): d[b % 2 == 0] d[b % 2 == 0] = x[::2] + def test_tuple_subclass(self): + arr = np.ones((5, 5)) + + # A tuple subclass should also be an nd-index + class TupleSubclass(tuple): + pass + index = ([1], [1]) + index = TupleSubclass(index) + assert_(arr[index].shape == (1,)) + # Unlike the non nd-index: + assert_(arr[index,].shape != (1,)) + + def test_broken_sequence_not_nd_index(self): + # See gh-5063: + # If we have an object which claims to be a sequence, but fails + # on item getting, this should not be converted to an nd-index (tuple) + # If this object happens to be a valid index otherwise, it should work + # This object here is very dubious and probably bad though: + class SequenceLike(object): + def __index__(self): + return 0 + + def __len__(self): + return 1 + + def __getitem__(self, item): + raise IndexError('Not possible') + + arr = np.arange(10) + assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) + class TestFieldIndexing(TestCase): def test_scalar_return_type(self): @@ -402,8 +444,14 @@ class TestBroadcastedAssignments(TestCase): # Too large and not only ones. assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) - assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1))) - assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1))) + + with warnings.catch_warnings(): + # Will be a ValueError as well. + warnings.simplefilter("error", DeprecationWarning) + assert_raises(DeprecationWarning, assign, a, s_[[1, 2, 3],], + np.ones((2, 1))) + assert_raises(DeprecationWarning, assign, a, s_[[[1], [2]],], + np.ones((2,2,1))) def test_simple_broadcasting_errors(self): @@ -520,11 +568,11 @@ class TestMultiIndexingAutomated(TestCase): These test use code to mimic the C-Code indexing for selection. NOTE: * This still lacks tests for complex item setting. - * If you change behavoir of indexing, you might want to modify + * If you change behavior of indexing, you might want to modify these tests to try more combinations. * Behavior was written to match numpy version 1.8. (though a first version matched 1.7.) - * Only tuple indicies are supported by the mimicing code. + * Only tuple indices are supported by the mimicking code. (and tested as of writing this) * Error types should match most of the time as long as there is only one error. For multiple errors, what gets raised @@ -547,7 +595,7 @@ class TestMultiIndexingAutomated(TestCase): slice(4, -1, -2), slice(None, None, -3), # Some Fancy indexes: - np.empty((0, 1, 1), dtype=np.intp), # empty broadcastable + np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast np.array([0, 1, -2]), np.array([[2], [0], [1]]), np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()), @@ -594,7 +642,7 @@ class TestMultiIndexingAutomated(TestCase): fancy_dim = 0 # NOTE: This is a funny twist (and probably OK to change). # The boolean array has illegal indexes, but this is - # allowed if the broadcasted fancy-indices are 0-sized. + # allowed if the broadcast fancy-indices are 0-sized. # This variable is to catch that case. error_unless_broadcast_to_empty = False @@ -639,7 +687,7 @@ class TestMultiIndexingAutomated(TestCase): if arr.ndim - ndim < 0: # we can't take more dimensions then we have, not even for 0-d arrays. # since a[()] makes sense, but not a[(),]. We will raise an error - # lateron, unless a broadcasting error occurs first. + # later on, unless a broadcasting error occurs first. raise IndexError if ndim == 0 and not None in in_indices: @@ -651,7 +699,7 @@ class TestMultiIndexingAutomated(TestCase): for ax, indx in enumerate(in_indices): if isinstance(indx, slice): - # convert to an index array anways: + # convert to an index array indx = np.arange(*indx.indices(arr.shape[ax])) indices.append(['s', indx]) continue @@ -684,7 +732,7 @@ class TestMultiIndexingAutomated(TestCase): indx = flat_indx else: # This could be changed, a 0-d boolean index can - # make sense (even outide the 0-d indexed array case) + # make sense (even outside the 0-d indexed array case) # Note that originally this is could be interpreted as # integer in the full integer special case. raise IndexError @@ -736,7 +784,7 @@ class TestMultiIndexingAutomated(TestCase): arr = arr.transpose(*(fancy_axes + axes)) # We only have one 'f' index now and arr is transposed accordingly. - # Now handle newaxes by reshaping... + # Now handle newaxis by reshaping... ax = 0 for indx in indices: if indx[0] == 'f': @@ -754,7 +802,7 @@ class TestMultiIndexingAutomated(TestCase): res = np.broadcast(*indx[1:]) # raises ValueError... else: res = indx[1] - # unfortunatly the indices might be out of bounds. So check + # unfortunately the indices might be out of bounds. So check # that first, and use mode='wrap' then. However only if # there are any indices... if res.size != 0: @@ -892,7 +940,7 @@ class TestMultiIndexingAutomated(TestCase): # spot and the simple ones in one other spot. with warnings.catch_warnings(): # This is so that np.array(True) is not accepted in a full integer - # index, when running the file seperatly. + # index, when running the file separately. warnings.filterwarnings('error', '', DeprecationWarning) for simple_pos in [0, 2, 3]: tocheck = [self.fill_indices, self.complex_indices, diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index b92c45a1f..1e47a2297 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -246,6 +246,43 @@ class TestArrayConstruction(TestCase): r = np.array([[True, False], [True, False], [False, True]]) assert_equal(r, tgt.T) + def test_array_empty(self): + assert_raises(TypeError, np.array) + + def test_array_copy_false(self): + d = np.array([1, 2, 3]) + e = np.array(d, copy=False) + d[1] = 3 + assert_array_equal(e, [1, 3, 3]) + e = np.array(d, copy=False, order='F') + d[1] = 4 + assert_array_equal(e, [1, 4, 3]) + e[2] = 7 + assert_array_equal(d, [1, 4, 7]) + + def test_array_copy_true(self): + d = np.array([[1,2,3], [1, 2, 3]]) + e = np.array(d, copy=True) + d[0, 1] = 3 + e[0, 2] = -7 + assert_array_equal(e, [[1, 2, -7], [1, 2, 3]]) + assert_array_equal(d, [[1, 3, 3], [1, 2, 3]]) + e = np.array(d, copy=True, order='F') + d[0, 1] = 5 + e[0, 2] = 7 + assert_array_equal(e, [[1, 3, 7], [1, 2, 3]]) + assert_array_equal(d, [[1, 5, 3], [1,2,3]]) + + def test_array_cont(self): + d = np.ones(10)[::2] + assert_(np.ascontiguousarray(d).flags.c_contiguous) + assert_(np.ascontiguousarray(d).flags.f_contiguous) + assert_(np.asfortranarray(d).flags.c_contiguous) + assert_(np.asfortranarray(d).flags.f_contiguous) + d = np.ones((10, 10))[::2,::2] + assert_(np.ascontiguousarray(d).flags.c_contiguous) + assert_(np.asfortranarray(d).flags.f_contiguous) + class TestAssignment(TestCase): def test_assignment_broadcasting(self): @@ -549,6 +586,12 @@ class TestCreation(TestCase): assert_array_equal(zeros_like(d), d) assert_equal(zeros_like(d).dtype, d.dtype) + def test_empty_unicode(self): + # don't throw decode errors on garbage memory + for i in range(5, 100, 5): + d = np.empty(i, dtype='U') + str(d) + def test_sequence_non_homogenous(self): assert_equal(np.array([4, 2**80]).dtype, np.object) assert_equal(np.array([4, 2**80, 4]).dtype, np.object) @@ -593,6 +636,20 @@ class TestCreation(TestCase): assert_(a.dtype == np.dtype(object)) assert_raises(ValueError, np.array, [Fail()]) + def test_no_len_object_type(self): + # gh-5100, want object array from iterable object without len() + class Point2: + def __init__(self): + pass + + def __getitem__(self, ind): + if ind in [0, 1]: + return ind + else: + raise IndexError() + d = np.array([Point2(), Point2(), Point2()]) + assert_equal(d.dtype, np.dtype(object)) + class TestStructured(TestCase): def test_subarray_field_access(self): @@ -1394,6 +1451,12 @@ class TestMethods(TestCase): d[i:].partition(0, kind=k) assert_array_equal(d, tgt) + d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 9]) + kth = [0, 3, 19, 20] + assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7)) + assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7)) + d = np.array([2, 1]) d.partition(0, kind=k) assert_raises(ValueError, d.partition, 2) @@ -1589,6 +1652,18 @@ class TestMethods(TestCase): assert_raises(ValueError, d.partition, 2, kind=k) assert_raises(ValueError, d.argpartition, 2, kind=k) + def test_partition_fuzz(self): + # a few rounds of random data testing + for j in range(10, 30): + for i in range(1, j - 2): + d = np.arange(j) + np.random.shuffle(d) + d = d % np.random.randint(2, 30) + idx = np.random.randint(d.size) + kth = [0, idx, i, i + 1] + tgt = np.sort(d)[kth] + assert_array_equal(np.partition(d, kth)[kth], tgt, + err_msg="data: %r\n kth: %r" % (d, kth)) def test_flatten(self): x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32) @@ -1682,6 +1757,40 @@ class TestMethods(TestCase): a.diagonal() assert_(sys.getrefcount(a) < 50) + def test_put(self): + icodes = np.typecodes['AllInteger'] + fcodes = np.typecodes['AllFloat'] + for dt in icodes + fcodes + 'O': + tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt) + + # test 1-d + a = np.zeros(6, dtype=dt) + a.put([1, 3, 5], [1, 3, 5]) + assert_equal(a, tgt) + + # test 2-d + a = np.zeros((2, 3), dtype=dt) + a.put([1, 3, 5], [1, 3, 5]) + assert_equal(a, tgt.reshape(2, 3)) + + for dt in '?': + tgt = np.array([False, True, False, True, False, True], dtype=dt) + + # test 1-d + a = np.zeros(6, dtype=dt) + a.put([1, 3, 5], [True]*3) + assert_equal(a, tgt) + + # test 2-d + a = np.zeros((2, 3), dtype=dt) + a.put([1, 3, 5], [True]*3) + assert_equal(a, tgt.reshape(2, 3)) + + # check must be writeable + a = np.zeros(6) + a.flags.writeable = False + assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5]) + def test_ravel(self): a = np.array([[0, 1], [2, 3]]) assert_equal(a.ravel(), [0, 1, 2, 3]) @@ -1704,7 +1813,7 @@ class TestMethods(TestCase): assert_equal(a.ravel(), a.reshape(-1)) assert_equal(a.ravel(order='A'), a.reshape(-1, order='A')) - a = np.array([[0, 1], [2, 3]])[::-1,:] + a = np.array([[0, 1], [2, 3]])[::-1, :] assert_equal(a.ravel(), [2, 3, 0, 1]) assert_equal(a.ravel(order='C'), [2, 3, 0, 1]) assert_equal(a.ravel(order='F'), [2, 0, 3, 1]) @@ -1713,8 +1822,169 @@ class TestMethods(TestCase): assert_equal(a.ravel(order='K'), [2, 3, 0, 1]) assert_(a.ravel(order='K').flags.owndata) + # Not contiguous and 1-sized axis with non matching stride + a = np.arange(2**3 * 2)[::2] + a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) + strides = list(a.strides) + strides[1] = 123 + a.strides = strides + assert_(np.may_share_memory(a.ravel(order='K'), a)) + assert_equal(a.ravel('K'), np.arange(0, 15, 2)) + + # General case of possible ravel that is not contiguous but + # works and includes a 1-sized axis with non matching stride + a = a.swapaxes(-1, -2) # swap back to C-order + assert_(np.may_share_memory(a.ravel(order='C'), a)) + assert_(np.may_share_memory(a.ravel(order='K'), a)) + + a = a.T # swap all to Fortran order + assert_(np.may_share_memory(a.ravel(order='F'), a)) + assert_(np.may_share_memory(a.ravel(order='K'), a)) + + # Test negative strides: + a = np.arange(4)[::-1].reshape(2, 2) + assert_(np.may_share_memory(a.ravel(order='C'), a)) + assert_(np.may_share_memory(a.ravel(order='K'), a)) + assert_equal(a.ravel('C'), [3, 2, 1, 0]) + assert_equal(a.ravel('K'), [3, 2, 1, 0]) + + # Test keeporder with weirdly strided 1-sized dims (1-d first stride) + a = np.arange(8)[::2].reshape(1, 2, 2, 1) # neither C, nor F order + strides = list(a.strides) + strides[0] = -12 + strides[-1] = 0 + a.strides = strides + assert_(np.may_share_memory(a.ravel(order='K'), a)) + assert_equal(a.ravel('K'), a.ravel('C')) + + # 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING): + a = np.array([[1]]) + a.strides = (123, 432) + # If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing + # them up on purpose: + if np.ones(1).strides == (8,): + assert_(np.may_share_memory(a.ravel('K'), a)) + assert_equal(a.ravel('K').strides, (a.dtype.itemsize,)) + + for order in ('C', 'F', 'A', 'K'): + # 0-d corner case: + a = np.array(0) + assert_equal(a.ravel(order), [0]) + assert_(np.may_share_memory(a.ravel(order), a)) + + #Test that certain non-inplace ravels work right (mostly) for 'K': + b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2) + a = b[..., ::2] + assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28]) + assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28]) + assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28]) + assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28]) + + a = b[::2, ...] + assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14]) + assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14]) + assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14]) + assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14]) + + def test_conjugate(self): + a = np.array([1-1j, 1+1j, 23+23.0j]) + ac = a.conj() + assert_equal(a.real, ac.real) + assert_equal(a.imag, -ac.imag) + assert_equal(ac, a.conjugate()) + assert_equal(ac, np.conjugate(a)) + + a = np.array([1-1j, 1+1j, 23+23.0j], 'F') + ac = a.conj() + assert_equal(a.real, ac.real) + assert_equal(a.imag, -ac.imag) + assert_equal(ac, a.conjugate()) + assert_equal(ac, np.conjugate(a)) + + a = np.array([1, 2, 3]) + ac = a.conj() + assert_equal(a, ac) + assert_equal(ac, a.conjugate()) + assert_equal(ac, np.conjugate(a)) + + a = np.array([1.0, 2.0, 3.0]) + ac = a.conj() + assert_equal(a, ac) + assert_equal(ac, a.conjugate()) + assert_equal(ac, np.conjugate(a)) + + a = np.array([1-1j, 1+1j, 1, 2.0], object) + ac = a.conj() + assert_equal(ac, [k.conjugate() for k in a]) + assert_equal(ac, a.conjugate()) + assert_equal(ac, np.conjugate(a)) + + a = np.array([1-1j, 1, 2.0, 'f'], object) + assert_raises(AttributeError, lambda: a.conj()) + assert_raises(AttributeError, lambda: a.conjugate()) + class TestBinop(object): + def test_inplace(self): + # test refcount 1 inplace conversion + assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]), + [0.5, 1.0]) + + d = np.array([0.5, 0.5])[::2] + assert_array_almost_equal(d * (d * np.array([1.0, 2.0])), + [0.25, 0.5]) + + a = np.array([0.5]) + b = np.array([0.5]) + c = a + b + c = a - b + c = a * b + c = a / b + assert_equal(a, b) + assert_almost_equal(c, 1.) + + c = a + b * 2. / b * a - a / b + assert_equal(a, b) + assert_equal(c, 0.5) + + # true divide + a = np.array([5]) + b = np.array([3]) + c = (a * a) / b + + assert_almost_equal(c, 25 / 3) + assert_equal(a, 5) + assert_equal(b, 3) + + def test_extension_incref_elide(self): + # test extension (e.g. cython) calling PyNumber_* slots without + # increasing the reference counts + # + # def incref_elide(a): + # d = input.copy() # refcount 1 + # return d, d + d # PyNumber_Add without increasing refcount + from numpy.core.multiarray_tests import incref_elide + d = np.ones(5) + orig, res = incref_elide(d) + # the return original should not be changed to an inplace operation + assert_array_equal(orig, d) + assert_array_equal(res, d + d) + + def test_extension_incref_elide_stack(self): + # scanning if the refcount == 1 object is on the python stack to check + # that we are called directly from python is flawed as object may still + # be above the stack pointer and we have no access to the top of it + # + # def incref_elide_l(d): + # return l[4] + l[4] # PyNumber_Add without increasing refcount + from numpy.core.multiarray_tests import incref_elide_l + # padding with 1 makes sure the object on the stack is not overwriten + l = [1, 1, 1, 1, np.ones(5)] + res = incref_elide_l(l) + # the return original should not be changed to an inplace operation + assert_array_equal(l[4], np.ones(5)) + assert_array_equal(res, l[4] + l[4]) + def test_ufunc_override_rop_precedence(self): # Check that __rmul__ and other right-hand operations have # precedence over __numpy_ufunc__ @@ -2154,6 +2424,11 @@ class TestArgmax(TestCase): a.argmax(-1, out=out) assert_equal(out, a.argmax(-1)) + def test_argmax_unicode(self): + d = np.zeros(6031, dtype='<U9') + d[5942] = "as" + assert_equal(d.argmax(), 5942) + class TestArgmin(TestCase): @@ -2259,6 +2534,11 @@ class TestArgmin(TestCase): a.argmin(-1, out=out) assert_equal(out, a.argmin(-1)) + def test_argmin_unicode(self): + d = np.ones(6031, dtype='<U9') + d[6001] = "0" + assert_equal(d.argmin(), 6001) + class TestMinMax(TestCase): def test_scalar(self): @@ -3207,6 +3487,44 @@ class TestStats(TestCase): res = dat.var(1) assert_(res.info == dat.info) +class TestVdot(TestCase): + def test_basic(self): + dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger'] + dt_complex = np.typecodes['Complex'] + + # test real + a = np.eye(3) + for dt in dt_numeric + 'O': + b = a.astype(dt) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), 3) + + # test complex + a = np.eye(3) * 1j + for dt in dt_complex + 'O': + b = a.astype(dt) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), 3) + + # test boolean + b = np.eye(3, dtype=np.bool) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), True) + + def test_vdot_array_order(self): + a = array([[1, 2], [3, 4]], order='C') + b = array([[1, 2], [3, 4]], order='F') + res = np.vdot(a, a) + + # integer arrays are exact + assert_equal(np.vdot(a, b), res) + assert_equal(np.vdot(b, a), res) + assert_equal(np.vdot(b, b), res) + + class TestDot(TestCase): def test_dot_2args(self): from numpy.core.multiarray import dot @@ -3269,6 +3587,16 @@ class TestDot(TestCase): r = np.empty((1024, 32), dtype=int) assert_raises(ValueError, dot, f, v, r) + def test_dot_array_order(self): + a = array([[1, 2], [3, 4]], order='C') + b = array([[1, 2], [3, 4]], order='F') + res = np.dot(a, a) + + # integer arrays are exact + assert_equal(np.dot(a, b), res) + assert_equal(np.dot(b, a), res) + assert_equal(np.dot(b, b), res) + def test_dot_scalar_and_matrix_of_objects(self): # Ticket #2469 arr = np.matrix([1, 2], dtype=object) @@ -3276,6 +3604,24 @@ class TestDot(TestCase): assert_equal(np.dot(arr, 3), desired) assert_equal(np.dot(3, arr), desired) + def test_dot_override(self): + class A(object): + def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): + return "A" + + class B(object): + def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): + return NotImplemented + + a = A() + b = B() + c = np.array([[1]]) + + assert_equal(np.dot(a, b), "A") + assert_equal(c.dot(a), "A") + assert_raises(TypeError, np.dot, b, c) + assert_raises(TypeError, c.dot, b) + class TestInner(TestCase): @@ -3286,6 +3632,14 @@ class TestInner(TestCase): assert_equal(np.inner(arr, 3), desired) assert_equal(np.inner(3, arr), desired) + def test_vecself(self): + # Ticket 844. + # Inner product of a vector with itself segfaults or give + # meaningless result + a = zeros(shape = (1, 80), dtype = float64) + p = inner(a, a) + assert_almost_equal(p, 0, decimal=14) + class TestSummarization(TestCase): def test_1d(self): @@ -3306,7 +3660,6 @@ class TestSummarization(TestCase): ' [ 501, 502, 503, ..., 999, 1000, 1001]])' assert_(repr(A) == reprA) - class TestChoose(TestCase): def setUp(self): self.x = 2*ones((3,), dtype=int) @@ -3973,6 +4326,31 @@ class TestNewBufferProtocol(object): x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3) self._check_roundtrip(x3) + def test_relaxed_strides(self): + # Test that relaxed strides are converted to non-relaxed + c = np.ones((1, 10, 10), dtype='i8') + + # Check for NPY_RELAXED_STRIDES_CHECKING: + if np.ones((10, 1), order="C").flags.f_contiguous: + c.strides = (-1, 80, 8) + + assert memoryview(c).strides == (800, 80, 8) + + # Writing C-contiguous data to a BytesIO buffer should work + fd = io.BytesIO() + fd.write(c.data) + + fortran = c.T + assert memoryview(fortran).strides == (8, 80, 800) + + arr = np.ones((1, 10)) + if arr.flags.f_contiguous: + shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS']) + assert_(strides[0] == 8) + arr = np.ones((10, 1), order='F') + shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS']) + assert_(strides[-1] == 8) + class TestArrayAttributeDeletion(object): @@ -4424,5 +4802,51 @@ class TestWhere(TestCase): assert_equal(np.where(False, b, a), "abcd") +class TestSizeOf(TestCase): + + def test_empty_array(self): + x = np.array([]) + assert_(sys.getsizeof(x) > 0) + + def check_array(self, dtype): + elem_size = dtype(0).itemsize + + for length in [10, 50, 100, 500]: + x = np.arange(length, dtype=dtype) + assert_(sys.getsizeof(x) > length * elem_size) + + def test_array_int32(self): + self.check_array(np.int32) + + def test_array_int64(self): + self.check_array(np.int64) + + def test_array_float32(self): + self.check_array(np.float32) + + def test_array_float64(self): + self.check_array(np.float64) + + def test_view(self): + d = np.ones(100) + assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) + + def test_reshape(self): + d = np.ones(100) + assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) + + def test_resize(self): + d = np.ones(100) + old = sys.getsizeof(d) + d.resize(50) + assert_(old > sys.getsizeof(d)) + d.resize(150) + assert_(old < sys.getsizeof(d)) + + def test_error(self): + d = np.ones(100) + assert_raises(TypeError, d.__sizeof__, "a") + + if __name__ == "__main__": run_module_suite() diff --git a/numpy/core/tests/test_multiarray_assignment.py b/numpy/core/tests/test_multiarray_assignment.py index 65a09086b..86e1b125e 100644 --- a/numpy/core/tests/test_multiarray_assignment.py +++ b/numpy/core/tests/test_multiarray_assignment.py @@ -1,7 +1,7 @@ from __future__ import division, absolute_import, print_function import numpy as np -from numpy.testing import TestCase +from numpy.testing import run_module_suite ndims = 2 size = 10 @@ -78,3 +78,7 @@ def test_overlapping_assignments(): dstidx = tuple([a[1] for a in ind]) yield _check_assignment, srcidx, dstidx + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 0055c038b..dbf2cfaae 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -2481,13 +2481,8 @@ def test_iter_non_writable_attribute_deletion(): "iterationneedsapi", "has_multi_index", "has_index", "dtypes", "ndim", "nop", "itersize", "finished"] - if sys.version[:3] == '2.4': - error = TypeError - else: - error = AttributeError - for s in attr: - assert_raises(error, delattr, it, s) + assert_raises(AttributeError, delattr, it, s) def test_iter_writable_attribute_deletion(): diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 100875469..ea145ef81 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -5,6 +5,7 @@ import platform from decimal import Decimal import warnings import itertools +import platform import numpy as np from numpy.core import * @@ -931,6 +932,7 @@ class TestNonzero(TestCase): assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1])) assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2])) + assert_(not x['a'].T.flags.aligned) assert_equal(np.count_nonzero(x['a'].T), 4) assert_equal(np.count_nonzero(x['b'].T), 5) assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0])) @@ -1047,8 +1049,17 @@ class TestArrayComparisons(TestCase): def assert_array_strict_equal(x, y): assert_array_equal(x, y) - # Check flags - assert_(x.flags == y.flags) + # Check flags, 32 bit arches typically don't provide 16 byte alignment + if ((x.dtype.alignment <= 8 or + np.intp().dtype.itemsize != 4) and + sys.platform != 'win32'): + assert_(x.flags == y.flags) + else: + assert_(x.flags.owndata == y.flags.owndata) + assert_(x.flags.writeable == y.flags.writeable) + assert_(x.flags.c_contiguous == y.flags.c_contiguous) + assert_(x.flags.f_contiguous == y.flags.f_contiguous) + assert_(x.flags.updateifcopy == y.flags.updateifcopy) # check endianness assert_(x.dtype.isnative == y.dtype.isnative) @@ -1905,16 +1916,30 @@ class TestLikeFuncs(TestCase): class _TestCorrelate(TestCase): def _setup(self, dt): self.x = np.array([1, 2, 3, 4, 5], dtype=dt) + self.xs = np.arange(1, 20)[::3] self.y = np.array([-1, -2, -3], dtype=dt) self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt) - self.z2 = np.array([ -5., -14., -26., -20., -14., -8., -3.], dtype=dt) + self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt) + self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt) + self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt) + self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt) + self.zs = np.array([-3., -14., -30., -48., -66., -84., + -102., -54., -19.], dtype=dt) def test_float(self): self._setup(np.float) z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior) assert_array_almost_equal(z, self.z1) + z = np.correlate(self.x, self.y[:-1], 'full', old_behavior=self.old_behavior) + assert_array_almost_equal(z, self.z1_4) z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior) assert_array_almost_equal(z, self.z2) + z = np.correlate(self.x[::-1], self.y, 'full', old_behavior=self.old_behavior) + assert_array_almost_equal(z, self.z1r) + z = np.correlate(self.y, self.x[::-1], 'full', old_behavior=self.old_behavior) + assert_array_almost_equal(z, self.z2r) + z = np.correlate(self.xs, self.y, 'full', old_behavior=self.old_behavior) + assert_array_almost_equal(z, self.zs) def test_object(self): self._setup(Decimal) @@ -1923,6 +1948,13 @@ class _TestCorrelate(TestCase): z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior) assert_array_almost_equal(z, self.z2) + def test_no_overwrite(self): + d = np.ones(100) + k = np.ones(3) + np.correlate(d, k) + assert_array_equal(d, np.ones(100)) + assert_array_equal(k, np.ones(3)) + class TestCorrelate(_TestCorrelate): old_behavior = True def _setup(self, dt): @@ -1931,6 +1963,7 @@ class TestCorrelate(_TestCorrelate): # as well _TestCorrelate._setup(self, dt) self.z2 = self.z1 + self.z2r = self.z1r @dec.deprecated() def test_complex(self): @@ -1961,6 +1994,19 @@ class TestCorrelateNew(_TestCorrelate): z = np.correlate(y, x, 'full', old_behavior=self.old_behavior) assert_array_almost_equal(z, r_z) +class TestConvolve(TestCase): + def test_object(self): + d = [1.] * 100 + k = [1.] * 3 + assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3)) + + def test_no_overwrite(self): + d = np.ones(100) + k = np.ones(3) + np.convolve(d, k) + assert_array_equal(d, np.ones(100)) + assert_array_equal(k, np.ones(3)) + class TestArgwhere(object): def test_2D(self): x = np.arange(6).reshape((2, 3)) diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py index 8c9ce5c70..355e5480a 100644 --- a/numpy/core/tests/test_records.py +++ b/numpy/core/tests/test_records.py @@ -1,5 +1,6 @@ from __future__ import division, absolute_import, print_function +import sys from os import path import numpy as np from numpy.testing import * @@ -15,6 +16,14 @@ class TestFromrecords(TestCase): r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]], names='col1,col2,col3') assert_equal(r[0].item(), (456, 'dbe', 1.2)) + assert_equal(r['col1'].dtype.kind, 'i') + if sys.version_info[0] >= 3: + assert_equal(r['col2'].dtype.kind, 'U') + assert_equal(r['col2'].dtype.itemsize, 12) + else: + assert_equal(r['col2'].dtype.kind, 'S') + assert_equal(r['col2'].dtype.itemsize, 3) + assert_equal(r['col3'].dtype.kind, 'f') def test_method_array(self): r = np.rec.array(asbytes('abcdefg') * 100, formats='i2,a3,i4', shape=3, byteorder='big') diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index 9f40d7b54..c7eaad984 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -181,7 +181,7 @@ class TestRegression(TestCase): assert_(np.all(b[yb] > 0.5)) def test_endian_where(self,level=rlevel): - """GitHuB issue #369""" + """GitHub issue #369""" net = np.zeros(3, dtype='>f4') net[1] = 0.00458849 net[2] = 0.605202 @@ -290,7 +290,7 @@ class TestRegression(TestCase): def test_tobytes_FORTRANORDER_discontiguous(self,level=rlevel): """Fix in r2836""" - # Create discontiguous Fortran-ordered array + # Create non-contiguous Fortran ordered array x = np.array(np.random.rand(3, 3), order='F')[:, :2] assert_array_almost_equal(x.ravel(), np.fromstring(x.tobytes())) @@ -311,7 +311,7 @@ class TestRegression(TestCase): self.assertRaises(ValueError, bfb) def test_nonarray_assignment(self): - # See also Issue gh-2870, test for nonarray assignment + # See also Issue gh-2870, test for non-array assignment # and equivalent unsafe casted array assignment a = np.arange(10) b = np.ones(10, dtype=bool) @@ -398,6 +398,41 @@ class TestRegression(TestCase): assert_raises(KeyError, np.lexsort, BuggySequence()) + def test_pickle_py2_bytes_encoding(self): + # Check that arrays and scalars pickled on Py2 are + # unpickleable on Py3 using encoding='bytes' + + test_data = [ + # (original, py2_pickle) + (np.unicode_('\u6f2c'), + asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n" + "(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n" + "I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n.")), + + (np.array([9e123], dtype=np.float64), + asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n" + "p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n" + "p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n" + "I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb.")), + + (np.array([(9e123,)], dtype=[('name', float)]), + asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n" + "(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n" + "(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n" + "(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n" + "I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n" + "bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb.")), + ] + + if sys.version_info[:2] >= (3, 4): + # encoding='bytes' was added in Py3.4 + for original, data in test_data: + result = pickle.loads(data, encoding='bytes') + assert_equal(result, original) + + if isinstance(result, np.ndarray) and result.dtype.names: + for name in result.dtype.names: + assert_(isinstance(name, str)) def test_pickle_dtype(self,level=rlevel): """Ticket #251""" @@ -560,7 +595,7 @@ class TestRegression(TestCase): assert_(a.reshape(5, 1).strides[0] == 0) def test_reshape_zero_size(self, level=rlevel): - """Github Issue #2700, setting shape failed for 0-sized arrays""" + """GitHub Issue #2700, setting shape failed for 0-sized arrays""" a = np.ones((0, 2)) a.shape = (-1, 2) @@ -568,7 +603,7 @@ class TestRegression(TestCase): # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous. @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max) def test_reshape_trailing_ones_strides(self): - # Github issue gh-2949, bad strides for trailing ones of new shape + # GitHub issue gh-2949, bad strides for trailing ones of new shape a = np.zeros(12, dtype=np.int32)[::2] # not contiguous strides_c = (16, 8, 8, 8) strides_f = (8, 24, 48, 48) @@ -756,8 +791,12 @@ class TestRegression(TestCase): s = np.ones(10, dtype=float) x = np.array((15,), dtype=float) def ia(x, s, v): x[(s>0)]=v - self.assertRaises(ValueError, ia, x, s, np.zeros(9, dtype=float)) - self.assertRaises(ValueError, ia, x, s, np.zeros(11, dtype=float)) + # After removing deprecation, the following are ValueErrors. + # This might seem odd as compared to the value error below. This + # is due to the fact that the new code always uses "nonzero" logic + # and the boolean special case is not taken. + self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float)) + self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float)) # Old special case (different code path): self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float)) @@ -844,7 +883,7 @@ class TestRegression(TestCase): cnt0_b = cnt(b) cnt0_c = cnt(c) - # -- 0d -> 1d broadcasted slice assignment + # -- 0d -> 1-d broadcast slice assignment arr = np.zeros(5, dtype=np.object_) @@ -861,7 +900,7 @@ class TestRegression(TestCase): del arr - # -- 1d -> 2d broadcasted slice assignment + # -- 1-d -> 2-d broadcast slice assignment arr = np.zeros((5, 2), dtype=np.object_) arr0 = np.zeros(2, dtype=np.object_) @@ -880,7 +919,7 @@ class TestRegression(TestCase): del arr, arr0 - # -- 2d copying + flattening + # -- 2-d copying + flattening arr = np.zeros((5, 2), dtype=np.object_) @@ -1025,8 +1064,8 @@ class TestRegression(TestCase): b = np.zeros((2, 1), dtype = np.single) try: a.compress([True, False], axis = 1, out = b) - raise AssertionError("compress with an out which cannot be " \ - "safely casted should not return "\ + raise AssertionError("compress with an out which cannot be " + "safely casted should not return " "successfully") except TypeError: pass @@ -1794,6 +1833,67 @@ class TestRegression(TestCase): bytestring = "\x01 ".encode('ascii') assert_equal(bytestring[0:1], '\x01'.encode('ascii')) + def test_pickle_py2_array_latin1_hack(self): + # Check that unpickling hacks in Py3 that support + # encoding='latin1' work correctly. + + # Python2 output for pickle.dumps(numpy.array([129], dtype='b')) + data = asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n" + "tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n" + "I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n" + "p13\ntp14\nb.") + if sys.version_info[0] >= 3: + # This should work: + result = pickle.loads(data, encoding='latin1') + assert_array_equal(result, np.array([129], dtype='b')) + # Should not segfault: + assert_raises(Exception, pickle.loads, data, encoding='koi8-r') + + def test_pickle_py2_scalar_latin1_hack(self): + # Check that scalar unpickling hack in Py3 that supports + # encoding='latin1' work correctly. + + # Python2 output for pickle.dumps(...) + datas = [ + # (original, python2_pickle, koi8r_validity) + (np.unicode_('\u6bd2'), + asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n" + "(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n" + "tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."), + 'invalid'), + + (np.float64(9e123), + asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n" + "p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n" + "bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."), + 'invalid'), + + (np.bytes_(asbytes('\x9c')), # different 8-bit code point in KOI8-R vs latin1 + asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n" + "I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n" + "tp8\nRp9\n."), + 'different'), + ] + if sys.version_info[0] >= 3: + for original, data, koi8r_validity in datas: + result = pickle.loads(data, encoding='latin1') + assert_equal(result, original) + + # Decoding under non-latin1 encoding (e.g.) KOI8-R can + # produce bad results, but should not segfault. + if koi8r_validity == 'different': + # Unicode code points happen to lie within latin1, + # but are different in koi8-r, resulting to silent + # bogus results + result = pickle.loads(data, encoding='koi8-r') + assert_(result != original) + elif koi8r_validity == 'invalid': + # Unicode code points outside latin1, so results + # to an encoding exception + assert_raises(ValueError, pickle.loads, data, encoding='koi8-r') + else: + raise ValueError(koi8r_validity) + def test_structured_type_to_object(self): a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8') a_obj = np.empty((2,), dtype=object) diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index d823e963f..3ba3beff9 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -83,6 +83,18 @@ class TestBaseMath(TestCase): np.add(1, inp2, out=out) assert_almost_equal(out, exp1, err_msg=msg) + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_almost_equal(d + d, d * 2) + np.add(d, d, out=o) + np.add(np.ones_like(d), d, out=o) + np.add(d, np.ones_like(d), out=o) + np.add(np.ones_like(d), d) + np.add(d, np.ones_like(d)) + class TestPower(TestCase): def test_small_types(self): @@ -259,5 +271,18 @@ class TestRepr(object): for t in [np.float32, np.float64]: yield self._test_type_repr, t + +class TestSizeOf(TestCase): + + def test_equal_nbytes(self): + for type in types: + x = type(0) + assert_(sys.getsizeof(x) > x.nbytes) + + def test_error(self): + d = np.float32() + assert_raises(TypeError, d.__sizeof__, "a") + + if __name__ == "__main__": run_module_suite() diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index 080606dce..a285d5334 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -14,6 +14,10 @@ class TestUfunc(TestCase): import pickle assert pickle.loads(pickle.dumps(np.sin)) is np.sin + # Check that ufunc not defined in the top level numpy namespace such as + # numpy.core.test_rational.test_add can also be pickled + assert pickle.loads(pickle.dumps(test_add)) is test_add + def test_pickle_withstring(self): import pickle astring = asbytes("cnumpy.core\n_ufunc_reconstruct\np0\n" @@ -384,21 +388,18 @@ class TestUfunc(TestCase): msg = "extend & broadcast loop dimensions" b = np.arange(4).reshape((2, 2)) assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) - msg = "broadcast in core dimensions" + # Broadcast in core dimensions should fail a = np.arange(8).reshape((4, 2)) b = np.arange(4).reshape((4, 1)) - assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) - msg = "extend & broadcast core and loop dimensions" + assert_raises(ValueError, umt.inner1d, a, b) + # Extend core dimensions should fail a = np.arange(8).reshape((4, 2)) b = np.array(7) - assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) - msg = "broadcast should fail" + assert_raises(ValueError, umt.inner1d, a, b) + # Broadcast should fail a = np.arange(2).reshape((2, 1, 1)) b = np.arange(3).reshape((3, 1, 1)) - try: - ret = umt.inner1d(a, b) - assert_equal(ret, None, err_msg=msg) - except ValueError: None + assert_raises(ValueError, umt.inner1d, a, b) def test_type_cast(self): msg = "type cast" @@ -538,8 +539,8 @@ class TestUfunc(TestCase): a2 = d2.transpose(p2)[s2] ref = ref and a1.base != None ref = ref and a2.base != None - if broadcastable(a1.shape[-1], a2.shape[-2]) and \ - broadcastable(a1.shape[0], a2.shape[0]): + if (a1.shape[-1] == a2.shape[-2] and + broadcastable(a1.shape[0], a2.shape[0])): assert_array_almost_equal( umt.matrix_multiply(a1, a2), np.sum(a2[..., np.newaxis].swapaxes(-3, -1) * @@ -549,6 +550,16 @@ class TestUfunc(TestCase): assert_equal(ref, True, err_msg="reference check") + def test_euclidean_pdist(self): + a = np.arange(12, dtype=np.float).reshape(4, 3) + out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype) + umt.euclidean_pdist(a, out) + b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1)) + b = b[~np.tri(a.shape[0], dtype=bool)] + assert_almost_equal(out, b) + # An output array is required to determine p with signature (n,d)->(p) + assert_raises(ValueError, umt.euclidean_pdist, a) + def test_object_logical(self): a = np.array([3, None, True, False, "test", ""], dtype=object) assert_equal(np.logical_or(a, None), @@ -647,7 +658,6 @@ class TestUfunc(TestCase): a = np.array(1).view(MyArray) assert_(type(np.any(a)) is MyArray) - def test_casting_out_param(self): # Test that it's possible to do casts on output a = np.ones((200, 100), np.int64) @@ -834,45 +844,20 @@ class TestUfunc(TestCase): def test_safe_casting(self): # In old versions of numpy, in-place operations used the 'unsafe' - # casting rules. In some future version, 'same_kind' will become the - # default. + # casting rules. In versions >= 1.10, 'same_kind' is the + # default and an exception is raised instead of a warning. + # when 'same_kind' is not satisfied. a = np.array([1, 2, 3], dtype=int) # Non-in-place addition is fine assert_array_equal(assert_no_warnings(np.add, a, 1.1), [2.1, 3.1, 4.1]) - assert_warns(DeprecationWarning, np.add, a, 1.1, out=a) - assert_array_equal(a, [2, 3, 4]) + assert_raises(TypeError, np.add, a, 1.1, out=a) def add_inplace(a, b): a += b - assert_warns(DeprecationWarning, add_inplace, a, 1.1) - assert_array_equal(a, [3, 4, 5]) - # Make sure that explicitly overriding the warning is allowed: + assert_raises(TypeError, add_inplace, a, 1.1) + # Make sure that explicitly overriding the exception is allowed: assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe") - assert_array_equal(a, [4, 5, 6]) - - # There's no way to propagate exceptions from the place where we issue - # this deprecation warning, so we must throw the exception away - # entirely rather than cause it to be raised at some other point, or - # trigger some other unsuspecting if (PyErr_Occurred()) { ...} at some - # other location entirely. - import warnings - import sys - if sys.version_info[0] >= 3: - from io import StringIO - else: - from StringIO import StringIO - with warnings.catch_warnings(): - warnings.simplefilter("error") - old_stderr = sys.stderr - try: - sys.stderr = StringIO() - # No error, but dumps to stderr - a += 1.1 - # No error on the next bit of code executed either - 1 + 1 - assert_("Implicitly casting" in sys.stderr.getvalue()) - finally: - sys.stderr = old_stderr + assert_array_equal(a, [2, 3, 4]) def test_ufunc_custom_out(self): # Test ufunc with built in input types and custom output type @@ -1087,5 +1072,64 @@ class TestUfunc(TestCase): self.assertRaises(TypeError, np.add.at, values, [0, 1], 1) assert_array_equal(values, np.array(['a', 1], dtype=np.object)) + def test_reduce_arguments(self): + f = np.add.reduce + d = np.ones((5,2), dtype=int) + o = np.ones((2,), dtype=d.dtype) + r = o * 5 + assert_equal(f(d), r) + # a, axis=0, dtype=None, out=None, keepdims=False + assert_equal(f(d, axis=0), r) + assert_equal(f(d, 0), r) + assert_equal(f(d, 0, dtype=None), r) + assert_equal(f(d, 0, dtype='i'), r) + assert_equal(f(d, 0, 'i'), r) + assert_equal(f(d, 0, None), r) + assert_equal(f(d, 0, None, out=None), r) + assert_equal(f(d, 0, None, out=o), r) + assert_equal(f(d, 0, None, o), r) + assert_equal(f(d, 0, None, None), r) + assert_equal(f(d, 0, None, None, keepdims=False), r) + assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape)) + # multiple keywords + assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r) + assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r) + assert_equal(f(d, 0, None, out=None, keepdims=False), r) + + # too little + assert_raises(TypeError, f) + # too much + assert_raises(TypeError, f, d, 0, None, None, False, 1) + # invalid axis + assert_raises(TypeError, f, d, "invalid") + assert_raises(TypeError, f, d, axis="invalid") + assert_raises(TypeError, f, d, axis="invalid", dtype=None, + keepdims=True) + # invalid dtype + assert_raises(TypeError, f, d, 0, "invalid") + assert_raises(TypeError, f, d, dtype="invalid") + assert_raises(TypeError, f, d, dtype="invalid", out=None) + # invalid out + assert_raises(TypeError, f, d, 0, None, "invalid") + assert_raises(TypeError, f, d, out="invalid") + assert_raises(TypeError, f, d, out="invalid", dtype=None) + # keepdims boolean, no invalid value + # assert_raises(TypeError, f, d, 0, None, None, "invalid") + # assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None) + # invalid mix + assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid", + out=None) + + # invalid keyord + assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid", + out=None) + assert_raises(TypeError, f, d, invalid=0) + assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True, + out=None, invalid=0) + assert_raises(TypeError, f, d, axis=0, dtype=None, + out=None, invalid=0) + assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0) + + if __name__ == "__main__": run_module_suite() diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 29ed66aeb..c71b7b658 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -117,6 +117,19 @@ class TestDivision(TestCase): assert_equal(y, [1.e+110, 0], err_msg=msg) +class TestCbrt(TestCase): + def test_cbrt_scalar(self): + assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5) + + def test_cbrt(self): + x = np.array([1., 2., -3., np.inf, -np.inf]) + assert_almost_equal(np.cbrt(x**3), x) + + assert_(np.isnan(np.cbrt(np.nan))) + assert_equal(np.cbrt(np.inf), np.inf) + assert_equal(np.cbrt(-np.inf), -np.inf) + + class TestPower(TestCase): def test_power_float(self): x = np.array([1., 2., 3.]) @@ -279,7 +292,7 @@ class TestLogAddExp2(_FilterInvalids): x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] z = [inf, inf, inf, -inf, inf, inf, 1, 1] - with np.errstate(invalid='ignore'): + with np.errstate(invalid='raise'): for dt in ['f', 'd', 'g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) @@ -342,7 +355,7 @@ class TestLogAddExp(_FilterInvalids): x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] z = [inf, inf, inf, -inf, inf, inf, 1, 1] - with np.errstate(invalid='ignore'): + with np.errstate(invalid='raise'): for dt in ['f', 'd', 'g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) @@ -363,11 +376,10 @@ class TestLog1p(TestCase): assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6)) def test_special(self): - assert_equal(ncu.log1p(np.nan), np.nan) - assert_equal(ncu.log1p(np.inf), np.inf) - with np.errstate(divide="ignore"): + with np.errstate(invalid="ignore", divide="ignore"): + assert_equal(ncu.log1p(np.nan), np.nan) + assert_equal(ncu.log1p(np.inf), np.inf) assert_equal(ncu.log1p(-1.), -np.inf) - with np.errstate(invalid="ignore"): assert_equal(ncu.log1p(-2.), np.nan) assert_equal(ncu.log1p(-np.inf), np.nan) @@ -570,6 +582,17 @@ class TestMaximum(_FilterInvalids): out = np.array([nan, nan, nan]) assert_equal(np.maximum(arg1, arg2), out) + def test_object_nans(self): + # Multiple checks to give this a chance to + # fail if cmp is used instead of rich compare. + # Failure cannot be guaranteed. + for i in range(1): + x = np.array(float('nan'), np.object) + y = 1.0 + z = np.array(float('nan'), np.object) + assert_(np.maximum(x, y) == 1.0) + assert_(np.maximum(z, y) == 1.0) + def test_complex_nans(self): nan = np.nan for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] : @@ -617,6 +640,17 @@ class TestMinimum(_FilterInvalids): out = np.array([nan, nan, nan]) assert_equal(np.minimum(arg1, arg2), out) + def test_object_nans(self): + # Multiple checks to give this a chance to + # fail if cmp is used instead of rich compare. + # Failure cannot be guaranteed. + for i in range(1): + x = np.array(float('nan'), np.object) + y = 1.0 + z = np.array(float('nan'), np.object) + assert_(np.minimum(x, y) == 1.0) + assert_(np.minimum(z, y) == 1.0) + def test_complex_nans(self): nan = np.nan for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] : @@ -716,21 +750,54 @@ class TestFmin(_FilterInvalids): class TestBool(TestCase): - def test_truth_table(self): + def test_truth_table_logical(self): + # 2, 3 and 4 serves as true values + input1 = [0, 0, 3, 2] + input2 = [0, 4, 0, 2] + + typecodes = (np.typecodes['AllFloat'] + + np.typecodes['AllInteger'] + + '?') # boolean + for dtype in map(np.dtype, typecodes): + arg1 = np.asarray(input1, dtype=dtype) + arg2 = np.asarray(input2, dtype=dtype) + + # OR + out = [False, True, True, True] + for func in (np.logical_or, np.maximum): + assert_equal(func(arg1, arg2).astype(bool), out) + # AND + out = [False, False, False, True] + for func in (np.logical_and, np.minimum): + assert_equal(func(arg1, arg2).astype(bool), out) + # XOR + out = [False, True, True, False] + for func in (np.logical_xor, np.not_equal): + assert_equal(func(arg1, arg2).astype(bool), out) + + def test_truth_table_bitwise(self): arg1 = [False, False, True, True] arg2 = [False, True, False, True] - # OR + out = [False, True, True, True] - for func in (np.logical_or, np.bitwise_or, np.maximum): - assert_equal(func(arg1, arg2), out) - # AND + assert_equal(np.bitwise_or(arg1, arg2), out) + out = [False, False, False, True] - for func in (np.logical_and, np.bitwise_and, np.minimum): - assert_equal(func(arg1, arg2), out) - # XOR + assert_equal(np.bitwise_and(arg1, arg2), out) + out = [False, True, True, False] - for func in (np.logical_xor, np.bitwise_xor, np.not_equal): - assert_equal(func(arg1, arg2), out) + assert_equal(np.bitwise_xor(arg1, arg2), out) + + +class TestInt(TestCase): + def test_logical_not(self): + x = np.ones(10, dtype=np.int16) + o = np.ones(10 * 2, dtype=np.bool) + tgt = o.copy() + tgt[::2] = False + os = o[::2] + assert_array_equal(np.logical_not(x, out=os), False) + assert_array_equal(o, tgt) class TestFloatingPoint(TestCase): @@ -783,6 +850,13 @@ class TestMinMax(TestCase): inp[i] = -1e10 assert_equal(inp.min(), -1e10, err_msg=msg) + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_equal(d.max(), d[0]) + assert_equal(d.min(), d[0]) + class TestAbsoluteNegative(TestCase): def test_abs_neg_blocked(self): @@ -815,6 +889,17 @@ class TestAbsoluteNegative(TestCase): np.negative(inp, out=out) assert_array_equal(out, -1*inp, err_msg=msg) + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_equal(np.abs(d), d) + assert_equal(np.negative(d), -d) + np.negative(d, out=d) + np.negative(np.ones_like(d), out=d) + np.abs(d, out=d) + np.abs(np.ones_like(d), out=d) + class TestSpecialMethods(TestCase): def test_wrap(self): diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py index 8484685c0..d7fe702a6 100644 --- a/numpy/distutils/ccompiler.py +++ b/numpy/distutils/ccompiler.py @@ -16,7 +16,7 @@ from distutils.version import LooseVersion from numpy.distutils import log from numpy.distutils.exec_command import exec_command from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ - quote_args + quote_args, get_num_build_jobs from numpy.distutils.compat import get_exception @@ -165,9 +165,10 @@ def CCompiler_compile(self, sources, output_dir=None, macros=None, return [] # FIXME:RELATIVE_IMPORT if sys.version_info[0] < 3: - from .fcompiler import FCompiler + from .fcompiler import FCompiler, is_f_file, has_f90_header else: - from numpy.distutils.fcompiler import FCompiler + from numpy.distutils.fcompiler import (FCompiler, is_f_file, + has_f90_header) if isinstance(self, FCompiler): display = [] for fc in ['f77', 'f90', 'fix']: @@ -189,20 +190,45 @@ def CCompiler_compile(self, sources, output_dir=None, macros=None, display += "\nextra options: '%s'" % (' '.join(extra_postargs)) log.info(display) - # build any sources in same order as they were originally specified - # especially important for fortran .f90 files using modules + def single_compile(args): + obj, (src, ext) = args + self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) + if isinstance(self, FCompiler): objects_to_build = list(build.keys()) + f77_objects, other_objects = [], [] for obj in objects: if obj in objects_to_build: src, ext = build[obj] if self.compiler_type=='absoft': obj = cyg2win32(obj) src = cyg2win32(src) - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) + if is_f_file(src) and not has_f90_header(src): + f77_objects.append((obj, (src, ext))) + else: + other_objects.append((obj, (src, ext))) + + # f77 objects can be built in parallel + build_items = f77_objects + # build f90 modules serial, module files are generated during + # compilation and may be used by files later in the list so the + # ordering is important + for o in other_objects: + single_compile(o) + else: + build_items = build.items() + + jobs = get_num_build_jobs() + if len(build) > 1 and jobs > 1: + # build parallel + import multiprocessing.pool + pool = multiprocessing.pool.ThreadPool(jobs) + pool.map(single_compile, build_items) + pool.close() else: - for obj, (src, ext) in build.items(): - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) + # build serial + for o in build_items: + single_compile(o) # Return *all* object filenames, not just the ones we just built. return objects diff --git a/numpy/distutils/command/autodist.py b/numpy/distutils/command/autodist.py index 1b9b1dd57..af53c5104 100644 --- a/numpy/distutils/command/autodist.py +++ b/numpy/distutils/command/autodist.py @@ -28,6 +28,23 @@ static %(inline)s int static_func (void) return '' +def check_restrict(cmd): + """Return the restrict identifier (may be empty).""" + cmd._check_compiler() + body = """ +static int static_func (char * %(restrict)s a) +{ + return 0; +} +""" + + for kw in ['restrict', '__restrict__', '__restrict']: + st = cmd.try_compile(body % {'restrict': kw}, None, None) + if st: + return kw + + return '' + def check_compiler_gcc4(cmd): """Return True if the C compiler is GCC 4.x.""" cmd._check_compiler() @@ -41,3 +58,37 @@ main() } """ return cmd.try_compile(body, None, None) + + +def check_gcc_function_attribute(cmd, attribute, name): + """Return True if the given function attribute is supported.""" + cmd._check_compiler() + body = """ +#pragma GCC diagnostic error "-Wattributes" +#pragma clang diagnostic error "-Wattributes" + +int %s %s(void*); + +int +main() +{ +} +""" % (attribute, name) + return cmd.try_compile(body, None, None) != 0 + +def check_gcc_variable_attribute(cmd, attribute): + """Return True if the given variable attribute is supported.""" + cmd._check_compiler() + body = """ +#pragma GCC diagnostic error "-Wattributes" +#pragma clang diagnostic error "-Wattributes" + +int %s foo; + +int +main() +{ + return 0; +} +""" % (attribute, ) + return cmd.try_compile(body, None, None) != 0 diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py index b6912be15..f7249ae81 100644 --- a/numpy/distutils/command/build.py +++ b/numpy/distutils/command/build.py @@ -16,6 +16,8 @@ class build(old_build): user_options = old_build.user_options + [ ('fcompiler=', None, "specify the Fortran compiler type"), + ('jobs=', 'j', + "number of parallel jobs"), ] help_options = old_build.help_options + [ @@ -26,8 +28,14 @@ class build(old_build): def initialize_options(self): old_build.initialize_options(self) self.fcompiler = None + self.jobs = None def finalize_options(self): + if self.jobs: + try: + self.jobs = int(self.jobs) + except ValueError: + raise ValueError("--jobs/-j argument must be an integer") build_scripts = self.build_scripts old_build.finalize_options(self) plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py index 84ca87250..6e65a3bfb 100644 --- a/numpy/distutils/command/build_clib.py +++ b/numpy/distutils/command/build_clib.py @@ -30,6 +30,8 @@ class build_clib(old_build_clib): ('fcompiler=', None, "specify the Fortran compiler type"), ('inplace', 'i', 'Build in-place'), + ('jobs=', 'j', + "number of parallel jobs"), ] boolean_options = old_build_clib.boolean_options + ['inplace'] @@ -38,7 +40,16 @@ class build_clib(old_build_clib): old_build_clib.initialize_options(self) self.fcompiler = None self.inplace = 0 - return + self.jobs = None + + def finalize_options(self): + if self.jobs: + try: + self.jobs = int(self.jobs) + except ValueError: + raise ValueError("--jobs/-j argument must be an integer") + old_build_clib.finalize_options(self) + self.set_undefined_options('build', ('jobs', 'jobs')) def have_f_sources(self): for (lib_name, build_info) in self.libraries: diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py index b48e4227a..59c453607 100644 --- a/numpy/distutils/command/build_ext.py +++ b/numpy/distutils/command/build_ext.py @@ -34,6 +34,8 @@ class build_ext (old_build_ext): user_options = old_build_ext.user_options + [ ('fcompiler=', None, "specify the Fortran compiler type"), + ('jobs=', 'j', + "number of parallel jobs"), ] help_options = old_build_ext.help_options + [ @@ -44,12 +46,19 @@ class build_ext (old_build_ext): def initialize_options(self): old_build_ext.initialize_options(self) self.fcompiler = None + self.jobs = None def finalize_options(self): + if self.jobs: + try: + self.jobs = int(self.jobs) + except ValueError: + raise ValueError("--jobs/-j argument must be an integer") incl_dirs = self.include_dirs old_build_ext.finalize_options(self) if incl_dirs is not None: self.include_dirs.extend(self.distribution.include_dirs or []) + self.set_undefined_options('build', ('jobs', 'jobs')) def run(self): if not self.extensions: @@ -407,11 +416,6 @@ class build_ext (old_build_ext): if ext.language=='c++' and cxx_compiler is not None: linker = cxx_compiler.link_shared_object - if sys.version[:3]>='2.3': - kws = {'target_lang':ext.language} - else: - kws = {} - linker(objects, ext_filename, libraries=libraries, library_dirs=library_dirs, @@ -419,7 +423,8 @@ class build_ext (old_build_ext): extra_postargs=extra_args, export_symbols=self.get_export_symbols(ext), debug=self.debug, - build_temp=self.build_temp,**kws) + build_temp=self.build_temp, + target_lang=ext.language) def _add_dummy_mingwex_sym(self, c_sources): build_src = self.get_finalized_command("build_src").build_src diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py index 0086e3632..4f104b222 100644 --- a/numpy/distutils/command/config.py +++ b/numpy/distutils/command/config.py @@ -16,7 +16,11 @@ from distutils.ccompiler import CompileError, LinkError import distutils from numpy.distutils.exec_command import exec_command from numpy.distutils.mingw32ccompiler import generate_manifest -from numpy.distutils.command.autodist import check_inline, check_compiler_gcc4 +from numpy.distutils.command.autodist import (check_gcc_function_attribute, + check_gcc_variable_attribute, + check_inline, + check_restrict, + check_compiler_gcc4) from numpy.distutils.compat import get_exception LANG_EXT['f77'] = '.f' @@ -59,17 +63,28 @@ class config(old_config): e = get_exception() msg = """\ Could not initialize compiler instance: do you have Visual Studio -installed ? If you are trying to build with mingw, please use python setup.py -build -c mingw32 instead ). If you have Visual Studio installed, check it is -correctly installed, and the right version (VS 2008 for python 2.6, VS 2003 for -2.5, etc...). Original exception was: %s, and the Compiler -class was %s +installed? If you are trying to build with MinGW, please use "python setup.py +build -c mingw32" instead. If you have Visual Studio installed, check it is +correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2, +VS 2010 for >= 3.3). + +Original exception was: %s, and the Compiler class was %s ============================================================================""" \ % (e, self.compiler.__class__.__name__) print ("""\ ============================================================================""") raise distutils.errors.DistutilsPlatformError(msg) + # After MSVC is initialized, add an explicit /MANIFEST to linker + # flags. See issues gh-4245 and gh-4101 for details. Also + # relevant are issues 4431 and 16296 on the Python bug tracker. + from distutils import msvc9compiler + if msvc9compiler.get_build_version() >= 10: + for ldflags in [self.compiler.ldflags_shared, + self.compiler.ldflags_shared_debug]: + if '/MANIFEST' not in ldflags: + ldflags.append('/MANIFEST') + if not isinstance(self.fcompiler, FCompiler): self.fcompiler = new_fcompiler(compiler=self.fcompiler, dry_run=self.dry_run, force=1, @@ -159,7 +174,7 @@ class was %s headers=None, include_dirs=None): self._check_compiler() body = """ -int main() +int main(void) { #ifndef %s (void) %s; @@ -174,7 +189,7 @@ int main() headers=None, include_dirs=None): self._check_compiler() body = """ -int main() +int main(void) { #if %s #else @@ -194,7 +209,7 @@ int main() # First check the type can be compiled body = r""" -int main() { +int main(void) { if ((%(name)s *) 0) return 0; if (sizeof (%(name)s)) @@ -222,7 +237,7 @@ int main() { # First check the type can be compiled body = r""" typedef %(type)s npy_check_sizeof_type; -int main () +int main (void) { static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; test_array [0] = 0 @@ -238,7 +253,7 @@ int main () if expected: body = r""" typedef %(type)s npy_check_sizeof_type; -int main () +int main (void) { static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; test_array [0] = 0 @@ -259,7 +274,7 @@ int main () # this fails to *compile* if size > sizeof(type) body = r""" typedef %(type)s npy_check_sizeof_type; -int main () +int main (void) { static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; test_array [0] = 0 @@ -398,10 +413,21 @@ int main () otherwise.""" return check_inline(self) + def check_restrict(self): + """Return the restrict keyword recognized by the compiler, empty string + otherwise.""" + return check_restrict(self) + def check_compiler_gcc4(self): """Return True if the C compiler is gcc >= 4.""" return check_compiler_gcc4(self) + def check_gcc_function_attribute(self, attribute, name): + return check_gcc_function_attribute(self, attribute, name) + + def check_gcc_variable_attribute(self, attribute): + return check_gcc_variable_attribute(self, attribute) + def get_output(self, body, headers=None, include_dirs=None, libraries=None, library_dirs=None, lang="c", use_tee=None): diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py index b786c0a46..368506470 100644 --- a/numpy/distutils/fcompiler/gnu.py +++ b/numpy/distutils/fcompiler/gnu.py @@ -220,6 +220,9 @@ class GnuFCompiler(FCompiler): def get_flags_arch(self): return [] + def runtime_library_dir_option(self, dir): + return '-Wl,-rpath="%s"' % dir + class Gnu95FCompiler(GnuFCompiler): compiler_type = 'gnu95' compiler_aliases = ('gfortran',) @@ -252,12 +255,13 @@ class Gnu95FCompiler(GnuFCompiler): possible_executables = ['gfortran', 'f95'] executables = { 'version_cmd' : ["<F90>", "--version"], - 'compiler_f77' : [None, "-Wall", "-ffixed-form", + 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", + "-fno-second-underscore"] + _EXTRAFLAGS, + 'compiler_f90' : [None, "-Wall", "-g", "-fno-second-underscore"] + _EXTRAFLAGS, - 'compiler_f90' : [None, "-Wall", "-fno-second-underscore"] + _EXTRAFLAGS, - 'compiler_fix' : [None, "-Wall", "-ffixed-form", + 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", "-fno-second-underscore"] + _EXTRAFLAGS, - 'linker_so' : ["<F90>", "-Wall"], + 'linker_so' : ["<F90>", "-Wall", "-g"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"], 'linker_exe' : [None, "-Wall"] diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index c146178f0..cab0a4cba 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -13,6 +13,10 @@ import shutil import distutils from distutils.errors import DistutilsError +try: + from threading import local as tlocal +except ImportError: + from dummy_threading import local as tlocal try: set @@ -31,7 +35,8 @@ __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', 'get_script_files', 'get_lib_source_files', 'get_data_files', 'dot_join', 'get_frame', 'minrelpath', 'njoin', 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', - 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info'] + 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info', + 'get_num_build_jobs'] class InstallableLib(object): """ @@ -60,6 +65,36 @@ class InstallableLib(object): self.build_info = build_info self.target_dir = target_dir + +def get_num_build_jobs(): + """ + Get number of parallel build jobs set by the --jobs command line argument + of setup.py + If the command did not receive a setting the environment variable + NPY_NUM_BUILD_JOBS checked and if that is unset it returns 1. + + Returns + ------- + out : int + number of parallel jobs that can be run + + """ + from numpy.distutils.core import get_distribution + envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", 1)) + dist = get_distribution() + # may be None during configuration + if dist is None: + return envjobs + + # any of these three may have the job set, take the largest + cmdattr = (getattr(dist.get_command_obj('build'), 'jobs'), + getattr(dist.get_command_obj('build_ext'), 'jobs'), + getattr(dist.get_command_obj('build_clib'), 'jobs')) + if all(x is None for x in cmdattr): + return envjobs + else: + return max(x for x in cmdattr if x is not None) + def quote_args(args): # don't used _nt_quote_args as it does not check if # args items already have quotes or not. @@ -249,9 +284,9 @@ def gpaths(paths, local_path='', include_non_existing=True): return _fix_paths(paths, local_path, include_non_existing) -_temporary_directory = None def clean_up_temporary_directory(): - global _temporary_directory + tdata = tlocal() + _temporary_directory = getattr(tdata, 'tempdir', None) if not _temporary_directory: return try: @@ -261,13 +296,13 @@ def clean_up_temporary_directory(): _temporary_directory = None def make_temp_file(suffix='', prefix='', text=True): - global _temporary_directory - if not _temporary_directory: - _temporary_directory = tempfile.mkdtemp() + tdata = tlocal() + if not hasattr(tdata, 'tempdir'): + tdata.tempdir = tempfile.mkdtemp() atexit.register(clean_up_temporary_directory) fid, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, - dir=_temporary_directory, + dir=tdata.tempdir, text=text) fo = os.fdopen(fid, 'w') return fo, name diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 48c92c548..ddb1513c4 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -434,7 +434,7 @@ class UmfpackNotFoundError(NotFoundError): the UMFPACK environment variable.""" -class system_info: +class system_info(object): """ get_info() is the only public method. Don't use others. """ @@ -962,7 +962,8 @@ class mkl_info(system_info): if info is None: return dict_append(info, - define_macros=[('SCIPY_MKL_H', None)], + define_macros=[('SCIPY_MKL_H', None), + ('HAVE_CBLAS', None)], include_dirs=incl_dirs) if sys.platform == 'win32': pass # win32 has no pthread library @@ -1120,6 +1121,7 @@ class atlas_blas_info(atlas_info): h = os.path.dirname(h) dict_append(info, include_dirs=[h]) info['language'] = 'c' + info['define_macros'] = [('HAVE_CBLAS', None)] atlas_version, atlas_extra_info = get_atlas_version(**atlas) dict_append(atlas, **atlas_extra_info) @@ -1414,7 +1416,8 @@ class lapack_opt_info(system_info): if args: self.set_info(extra_compile_args=args, extra_link_args=link_args, - define_macros=[('NO_ATLAS_INFO', 3)]) + define_macros=[('NO_ATLAS_INFO', 3), + ('HAVE_CBLAS', None)]) return #atlas_info = {} ## uncomment for testing @@ -1515,7 +1518,8 @@ class blas_opt_info(system_info): if args: self.set_info(extra_compile_args=args, extra_link_args=link_args, - define_macros=[('NO_ATLAS_INFO', 3)]) + define_macros=[('NO_ATLAS_INFO', 3), + ('HAVE_CBLAS', None)]) return need_blas = 0 @@ -1556,9 +1560,33 @@ class blas_info(system_info): info = self.check_libs(lib_dirs, blas_libs, []) if info is None: return - info['language'] = 'f77' # XXX: is it generally true? + if self.has_cblas(): + info['language'] = 'c' + info['define_macros'] = [('HAVE_CBLAS', None)] + else: + info['language'] = 'f77' # XXX: is it generally true? self.set_info(**info) + def has_cblas(self): + # primitive cblas check by looking for the header + res = False + c = distutils.ccompiler.new_compiler() + tmpdir = tempfile.mkdtemp() + s = """#include <cblas.h>""" + src = os.path.join(tmpdir, 'source.c') + try: + with open(src, 'wt') as f: + f.write(s) + try: + c.compile([src], output_dir=tmpdir, + include_dirs=self.get_include_dirs()) + res = True + except distutils.ccompiler.CompileError: + res = False + finally: + shutil.rmtree(tmpdir) + return res + class openblas_info(blas_info): section = 'openblas' @@ -1580,9 +1608,10 @@ class openblas_info(blas_info): return if not self.check_embedded_lapack(info): - return None + return - info['language'] = 'f77' # XXX: is it generally true? + info['language'] = 'c' + info['define_macros'] = [('HAVE_CBLAS', None)] self.set_info(**info) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 893081126..0fde37bcf 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -1274,7 +1274,7 @@ def markinnerspaces(line): cb='' for c in line: if cb=='\\' and c in ['\\', '\'', '"']: - l=l+c; + l=l+c cb=c continue if f==0 and c in ['\'', '"']: cc=c; cc1={'\'':'"','"':'\''}[c] @@ -2198,8 +2198,10 @@ def analyzevars(block): if 'intent' not in vars[n]: vars[n]['intent']=[] for c in [x.strip() for x in markoutercomma(intent).split('@,@')]: - if not c in vars[n]['intent']: - vars[n]['intent'].append(c) + # Remove spaces so that 'in out' becomes 'inout' + tmp = c.replace(' ', '') + if tmp not in vars[n]['intent']: + vars[n]['intent'].append(tmp) intent=None if note: note=note.replace('\\n\\n', '\n\n') @@ -2220,7 +2222,7 @@ def analyzevars(block): if 'check' not in vars[n]: vars[n]['check']=[] for c in [x.strip() for x in markoutercomma(check).split('@,@')]: - if not c in vars[n]['check']: + if c not in vars[n]['check']: vars[n]['check'].append(c) check=None if dim and 'dimension' not in vars[n]: diff --git a/numpy/f2py/setup.py b/numpy/f2py/setup.py index 2f1fd6a01..a27a001a9 100644 --- a/numpy/f2py/setup.py +++ b/numpy/f2py/setup.py @@ -32,12 +32,10 @@ from __version__ import version def configuration(parent_package='',top_path=None): config = Configuration('f2py', parent_package, top_path) - config.add_data_dir('docs') config.add_data_dir('tests') config.add_data_files('src/fortranobject.c', 'src/fortranobject.h', - 'f2py.1' ) config.make_svn_version_py() @@ -90,26 +88,24 @@ main() if __name__ == "__main__": config = configuration(top_path='') - version = config.get_version() print('F2PY Version', version) config = config.todict() - if sys.version[:3]>='2.3': - config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\ - "/F2PY-2-latest.tar.gz" - config['classifiers'] = [ - 'Development Status :: 5 - Production/Stable', - 'Intended Audience :: Developers', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: NumPy License', - 'Natural Language :: English', - 'Operating System :: OS Independent', - 'Programming Language :: C', - 'Programming Language :: Fortran', - 'Programming Language :: Python', - 'Topic :: Scientific/Engineering', - 'Topic :: Software Development :: Code Generators', - ] + config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\ + "/F2PY-2-latest.tar.gz" + config['classifiers'] = [ + 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: NumPy License', + 'Natural Language :: English', + 'Operating System :: OS Independent', + 'Programming Language :: C', + 'Programming Language :: Fortran', + 'Programming Language :: Python', + 'Topic :: Scientific/Engineering', + 'Topic :: Software Development :: Code Generators', + ] setup(version=version, description = "F2PY - Fortran to Python Interface Generaton", author = "Pearu Peterson", diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 9c96c1f46..001a4c7de 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -5,6 +5,9 @@ extern "C" { #endif +#include <stdlib.h> +#include <string.h> + /* This file implements: FortranObject, array_from_pyobj, copy_ND_array @@ -100,84 +103,141 @@ static PyMethodDef fortran_methods[] = { #endif -static PyObject * -fortran_doc (FortranDataDef def) { - char *p; - /* - p is used as a buffer to hold generated documentation strings. - A common operation in generating the documentation strings, is - appending a string to the buffer p. Earlier, the following - idiom was: +/* Returns number of bytes consumed from buf, or -1 on error. */ +static Py_ssize_t +format_def(char *buf, Py_ssize_t size, FortranDataDef def) +{ + char *p = buf; + int i, n; + + n = PyOS_snprintf(p, size, "array(%" NPY_INTP_FMT, def.dims.d[0]); + if (n < 0 || n >= size) { + return -1; + } + p += n; + size -= n; - sprintf(p, "%s<string to be appended>", p); + for (i = 1; i < def.rank; i++) { + n = PyOS_snprintf(p, size, ",%" NPY_INTP_FMT, def.dims.d[i]); + if (n < 0 || n >= size) { + return -1; + } + p += n; + size -= n; + } - but this does not work when _FORTIFY_SOURCE=2 is enabled: instead - of appending the string, the string is inserted. + if (size <= 0) { + return -1; + } - As a fix, the following idiom should be used for appending - strings to a buffer p: + p[size] = ')'; + p++; + size--; - sprintf(p + strlen(p), "<string to be appended>"); - */ + if (def.data == NULL) { + static const char notalloc[] = ", not allocated"; + if (size < sizeof(notalloc)) { + return -1; + } + memcpy(p, notalloc, sizeof(notalloc)); + } + + return p - buf; +} + +static PyObject * +fortran_doc(FortranDataDef def) +{ + char *buf, *p; PyObject *s = NULL; - int i; - unsigned size=100; - if (def.doc!=NULL) + Py_ssize_t n, origsize, size = 100; + + if (def.doc != NULL) { size += strlen(def.doc); - p = (char*)malloc (size); - p[0] = '\0'; /* make sure that the buffer has zero length */ - if (def.rank==-1) { - if (def.doc==NULL) { - if (sprintf(p,"%s - ",def.name)==0) goto fail; - if (sprintf(p+strlen(p),"no docs available")==0) + } + origsize = size; + buf = p = (char *)PyMem_Malloc(size); + if (buf == NULL) { + return PyErr_NoMemory(); + } + + if (def.rank == -1) { + if (def.doc) { + n = strlen(def.doc); + if (n > size) { goto fail; - } else { - if (sprintf(p+strlen(p),"%s",def.doc)==0) + } + memcpy(p, def.doc, n); + p += n; + size -= n; + } + else { + n = PyOS_snprintf(p, size, "%s - no docs available", def.name); + if (n < 0 || n >= size) { goto fail; + } + p += n; + size -= n; } - } else { + } + else { PyArray_Descr *d = PyArray_DescrFromType(def.type); - if (sprintf(p+strlen(p),"'%c'-",d->type)==0) { - Py_DECREF(d); - goto fail; - } + n = PyOS_snprintf(p, size, "'%c'-", d->type); Py_DECREF(d); - if (def.data==NULL) { - if (sprintf(p+strlen(p),"array(%" NPY_INTP_FMT,def.dims.d[0])==0) - goto fail; - for(i=1;i<def.rank;++i) - if (sprintf(p+strlen(p),",%" NPY_INTP_FMT,def.dims.d[i])==0) - goto fail; - if (sprintf(p+strlen(p),"), not allocated")==0) - goto fail; - } else { - if (def.rank>0) { - if (sprintf(p+strlen(p),"array(%"NPY_INTP_FMT,def.dims.d[0])==0) - goto fail; - for(i=1;i<def.rank;i++) - if (sprintf(p+strlen(p),",%" NPY_INTP_FMT,def.dims.d[i])==0) - goto fail; - if (sprintf(p+strlen(p),")")==0) goto fail; - } else { - if (sprintf(p+strlen(p),"scalar")==0) goto fail; + if (n < 0 || n >= size) { + goto fail; + } + p += n; + size -= n; + + if (def.data == NULL) { + n = format_def(p, size, def) == -1; + if (n < 0) { + goto fail; } + p += n; + size -= n; + } + else if (def.rank > 0) { + n = format_def(p, size, def); + if (n < 0) { + goto fail; + } + p += n; + size -= n; + } + else { + n = strlen("scalar"); + if (size < n) { + goto fail; + } + memcpy(p, "scalar", n); + p += n; + size -= n; } } - if (sprintf(p+strlen(p),"\n")==0) goto fail; - if (strlen(p)>size) { - fprintf(stderr,"fortranobject.c:fortran_doc:len(p)=%zd>%d(size):"\ - " too long doc string required, increase size\n",\ - strlen(p),size); + if (size <= 1) { goto fail; } + *p++ = '\n'; + size--; + + /* p now points one beyond the last character of the string in buf */ #if PY_VERSION_HEX >= 0x03000000 - s = PyUnicode_FromString(p); + s = PyUnicode_FromStringAndSize(buf, p - buf); #else - s = PyString_FromString(p); + s = PyString_FromStringAndSize(buf, p - buf); #endif - fail: - free(p); + + PyMem_Free(buf); return s; + + fail: + fprintf(stderr, "fortranobject.c: fortran_doc: len(p)=%zd>%zd=size:" + " too long docstring required, increase size\n", + p - buf, origsize); + PyMem_Free(buf); + return NULL; } static FortranDataDef *save_def; /* save pointer of an allocatable array */ @@ -619,11 +679,11 @@ PyArrayObject* array_from_pyobj(const int type_num, /* intent(cache), optional, intent(hide) */ if (count_nonpos(rank,dims)) { int i; - sprintf(mess,"failed to create intent(cache|hide)|optional array" - "-- must have defined dimensions but got ("); + strcpy(mess, "failed to create intent(cache|hide)|optional array" + "-- must have defined dimensions but got ("); for(i=0;i<rank;++i) sprintf(mess+strlen(mess),"%" NPY_INTP_FMT ",",dims[i]); - sprintf(mess+strlen(mess),")"); + strcat(mess, ")"); PyErr_SetString(PyExc_ValueError,mess); return NULL; } @@ -656,9 +716,9 @@ PyArrayObject* array_from_pyobj(const int type_num, Py_INCREF(obj); return (PyArrayObject *)obj; } - sprintf(mess,"failed to initialize intent(cache) array"); + strcpy(mess, "failed to initialize intent(cache) array"); if (!PyArray_ISONESEGMENT(obj)) - sprintf(mess+strlen(mess)," -- input must be in one segment"); + strcat(mess, " -- input must be in one segment"); if (PyArray_ITEMSIZE(arr)<elsize) sprintf(mess+strlen(mess)," -- expected at least elsize=%d but got %d", elsize,PyArray_ITEMSIZE(arr) @@ -694,11 +754,11 @@ PyArrayObject* array_from_pyobj(const int type_num, } if (intent & F2PY_INTENT_INOUT) { - sprintf(mess,"failed to initialize intent(inout) array"); + strcpy(mess, "failed to initialize intent(inout) array"); if ((intent & F2PY_INTENT_C) && !PyArray_ISCARRAY(arr)) - sprintf(mess+strlen(mess)," -- input not contiguous"); + strcat(mess, " -- input not contiguous"); if (!(intent & F2PY_INTENT_C) && !PyArray_ISFARRAY(arr)) - sprintf(mess+strlen(mess)," -- input not fortran contiguous"); + strcat(mess, " -- input not fortran contiguous"); if (PyArray_ITEMSIZE(arr)!=elsize) sprintf(mess+strlen(mess)," -- expected elsize=%d but got %d", elsize, diff --git a/numpy/f2py/tests/src/regression/inout.f90 b/numpy/f2py/tests/src/regression/inout.f90 new file mode 100644 index 000000000..80cdad90c --- /dev/null +++ b/numpy/f2py/tests/src/regression/inout.f90 @@ -0,0 +1,9 @@ +! Check that intent(in out) translates as intent(inout). +! The separation seems to be a common usage. + subroutine foo(x) + implicit none + real(4), intent(in out) :: x + dimension x(3) + x(1) = x(1) + x(2) + x(3) + return + end diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index 3a148e72c..c51fa3936 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -4,11 +4,13 @@ import unittest import os import sys import copy +import platform import nose from numpy.testing import * -from numpy import array, alltrue, ndarray, asarray, can_cast, zeros, dtype +from numpy import (array, alltrue, ndarray, asarray, can_cast, zeros, dtype, + intp, clongdouble) from numpy.core.multiarray import typeinfo import util @@ -81,37 +83,46 @@ class Intent(object): intent = Intent() -class Type(object): - _type_names = ['BOOL', 'BYTE', 'UBYTE', 'SHORT', 'USHORT', 'INT', 'UINT', - 'LONG', 'ULONG', 'LONGLONG', 'ULONGLONG', - 'FLOAT', 'DOUBLE', 'LONGDOUBLE', 'CFLOAT', 'CDOUBLE', - 'CLONGDOUBLE'] - _type_cache = {} - - _cast_dict = {'BOOL':['BOOL']} - _cast_dict['BYTE'] = _cast_dict['BOOL'] + ['BYTE'] - _cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE'] - _cast_dict['BYTE'] = ['BYTE'] - _cast_dict['UBYTE'] = ['UBYTE'] - _cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE', 'SHORT'] - _cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE', 'USHORT'] - _cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT', 'INT'] - _cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT', 'UINT'] - - _cast_dict['LONG'] = _cast_dict['INT'] + ['LONG'] - _cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG'] - - _cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG'] - _cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG'] - - _cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT', 'FLOAT'] - _cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT', 'FLOAT', 'DOUBLE'] - _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + ['ULONG', 'FLOAT', 'DOUBLE', 'LONGDOUBLE'] - - _cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT'] +_type_names = ['BOOL', 'BYTE', 'UBYTE', 'SHORT', 'USHORT', 'INT', 'UINT', + 'LONG', 'ULONG', 'LONGLONG', 'ULONGLONG', + 'FLOAT', 'DOUBLE', 'CFLOAT'] + +_cast_dict = {'BOOL':['BOOL']} +_cast_dict['BYTE'] = _cast_dict['BOOL'] + ['BYTE'] +_cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE'] +_cast_dict['BYTE'] = ['BYTE'] +_cast_dict['UBYTE'] = ['UBYTE'] +_cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE', 'SHORT'] +_cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE', 'USHORT'] +_cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT', 'INT'] +_cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT', 'UINT'] + +_cast_dict['LONG'] = _cast_dict['INT'] + ['LONG'] +_cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG'] + +_cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG'] +_cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG'] + +_cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT', 'FLOAT'] +_cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT', 'FLOAT', 'DOUBLE'] + +_cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT'] + +# 32 bit system malloc typically does not provide the alignment required by +# 16 byte long double types this means the inout intent cannot be satisfied and +# several tests fail as the alignment flag can be randomly true or fals +# when numpy gains an aligned allocator the tests could be enabled again +if ((intp().dtype.itemsize != 4 or clongdouble().dtype.alignment <= 8) and + sys.platform != 'win32'): + _type_names.extend(['LONGDOUBLE', 'CDOUBLE', 'CLONGDOUBLE']) + _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + \ + ['ULONG', 'FLOAT', 'DOUBLE', 'LONGDOUBLE'] + _cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + \ + ['CFLOAT', 'CDOUBLE', 'CLONGDOUBLE'] _cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT', 'CDOUBLE'] - _cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + ['CFLOAT', 'CDOUBLE', 'CLONGDOUBLE'] +class Type(object): + _type_cache = {} def __new__(cls, name): if isinstance(name, dtype): @@ -138,15 +149,15 @@ class Type(object): self.dtypechar = typeinfo[self.NAME][0] def cast_types(self): - return [self.__class__(_m) for _m in self._cast_dict[self.NAME]] + return [self.__class__(_m) for _m in _cast_dict[self.NAME]] def all_types(self): - return [self.__class__(_m) for _m in self._type_names] + return [self.__class__(_m) for _m in _type_names] def smaller_types(self): bits = typeinfo[self.NAME][3] types = [] - for name in self._type_names: + for name in _type_names: if typeinfo[name][3]<bits: types.append(Type(name)) return types @@ -154,7 +165,7 @@ class Type(object): def equal_types(self): bits = typeinfo[self.NAME][3] types = [] - for name in self._type_names: + for name in _type_names: if name==self.NAME: continue if typeinfo[name][3]==bits: types.append(Type(name)) @@ -163,7 +174,7 @@ class Type(object): def larger_types(self): bits = typeinfo[self.NAME][3] types = [] - for name in self._type_names: + for name in _type_names: if typeinfo[name][3]>bits: types.append(Type(name)) return types @@ -532,7 +543,7 @@ class _test_shared_memory: assert_(obj.dtype.type is self.type.dtype) # obj type is changed inplace! -for t in Type._type_names: +for t in _type_names: exec('''\ class test_%s_gen(unittest.TestCase, _test_shared_memory diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py new file mode 100644 index 000000000..9bd3f3fe3 --- /dev/null +++ b/numpy/f2py/tests/test_regression.py @@ -0,0 +1,32 @@ +from __future__ import division, absolute_import, print_function + +import os +import math + +import numpy as np +from numpy.testing import dec, assert_raises, assert_equal + +import util + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + +class TestIntentInOut(util.F2PyTest): + # Check that intent(in out) translates as intent(inout) + sources = [_path('src', 'regression', 'inout.f90')] + + @dec.slow + def test_inout(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float32)[::2] + assert_raises(ValueError, self.module.foo, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float32) + self.module.foo(x) + assert_equal(x, [3, 1, 2]) + + +if __name__ == "__main__": + import nose + nose.runmodule() diff --git a/numpy/fft/fftpack.py b/numpy/fft/fftpack.py index 706fcdd2f..4efb2a9a0 100644 --- a/numpy/fft/fftpack.py +++ b/numpy/fft/fftpack.py @@ -43,7 +43,7 @@ _fft_cache = {} _real_fft_cache = {} def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti, - work_function=fftpack.cfftf, fft_cache = _fft_cache ): + work_function=fftpack.cfftf, fft_cache=_fft_cache): a = asarray(a) if n is None: diff --git a/numpy/fft/setup.py b/numpy/fft/setup.py index 79f681e55..cd99a82d7 100644 --- a/numpy/fft/setup.py +++ b/numpy/fft/setup.py @@ -12,7 +12,6 @@ def configuration(parent_package='',top_path=None): sources=['fftpack_litemodule.c', 'fftpack.c'] ) - return config if __name__ == '__main__': diff --git a/numpy/fft/tests/test_fftpack.py b/numpy/fft/tests/test_fftpack.py index ac892c83b..45b5ac784 100644 --- a/numpy/fft/tests/test_fftpack.py +++ b/numpy/fft/tests/test_fftpack.py @@ -48,11 +48,11 @@ class TestFFTThreadSafe(TestCase): for i in range(self.threads)] [x.start() for x in t] + [x.join() for x in t] # Make sure all threads returned the correct value for i in range(self.threads): assert_array_equal(q.get(timeout=5), expected, 'Function returned wrong value in multithreaded context') - [x.join() for x in t] def test_fft(self): a = np.ones(self.input_shape) * 1+0j diff --git a/numpy/fft/tests/test_helper.py b/numpy/fft/tests/test_helper.py index 7eaa99fdb..1a51f8e3a 100644 --- a/numpy/fft/tests/test_helper.py +++ b/numpy/fft/tests/test_helper.py @@ -30,8 +30,8 @@ class TestFFTShift(TestCase): assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x) def test_axes_keyword(self): - freqs = [[ 0, 1, 2], [ 3, 4, -4], [-3, -2, -1]] - shifted = [[-1, -3, -2], [ 2, 0, 1], [-4, 3, 4]] + freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]] + shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]] assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted) assert_array_almost_equal(fft.fftshift(freqs, axes=0), fft.fftshift(freqs, axes=(0,))) diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index 1b1180893..9108b2e4c 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -687,7 +687,7 @@ class StringConverter(object): def upgrade(self, value): """ - Rind the best converter for a given string, and return the result. + Find the best converter for a given string, and return the result. The supplied string `value` is converted by testing different converters in order. First the `func` method of the diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py index 2d98c35d2..d3b6119f4 100644 --- a/numpy/lib/arraysetops.py +++ b/numpy/lib/arraysetops.py @@ -204,8 +204,9 @@ def unique(ar, return_index=False, return_inverse=False, return_counts=False): ret += (perm[flag],) if return_inverse: iflag = np.cumsum(flag) - 1 - iperm = perm.argsort() - ret += (np.take(iflag, iperm),) + inv_idx = np.empty(ar.shape, dtype=np.intp) + inv_idx[perm] = iflag + ret += (inv_idx,) if return_counts: idx = np.concatenate(np.nonzero(flag) + ([ar.size],)) ret += (np.diff(idx),) @@ -240,6 +241,11 @@ def intersect1d(ar1, ar2, assume_unique=False): >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) array([1, 3]) + To intersect more than two arrays, use functools.reduce: + + >>> from functools import reduce + >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) + array([3]) """ if not assume_unique: # Might be faster than unique( intersect1d( ar1, ar2 ) )? @@ -332,6 +338,10 @@ def in1d(ar1, ar2, assume_unique=False, invert=False): `in1d` can be considered as an element-wise function version of the python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly equivalent to ``np.array([item in b for item in a])``. + However, this idea fails if `ar2` is a set, or similar (non-sequence) + container: As ``ar2`` is converted to an array, in those cases + ``asarray(ar2)`` is an object array rather than the expected array of + contained values. .. versionadded:: 1.4.0 @@ -416,6 +426,11 @@ def union1d(ar1, ar2): >>> np.union1d([-1, 0, 1], [-2, 0, 2]) array([-2, -1, 0, 1, 2]) + To find the union of more than two arrays, use functools.reduce: + + >>> from functools import reduce + >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) + array([1, 2, 3, 4, 6]) """ return unique(np.concatenate((ar1, ar2))) diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 7c8dfbafa..b93f86ca3 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -298,7 +298,8 @@ def _write_array_header(fp, d, version=None): # can take advantage of our premature optimization. current_header_len = MAGIC_LEN + 2 + len(header) + 1 # 1 for the newline topad = 16 - (current_header_len % 16) - header = asbytes(header + ' '*topad + '\n') + header = header + ' '*topad + '\n' + header = asbytes(_filter_header(header)) if len(header) >= (256*256) and version == (1, 0): raise ValueError("header does not fit inside %s bytes required by the" @@ -433,7 +434,7 @@ def _filter_header(s): from io import StringIO else: from StringIO import StringIO - + tokens = [] last_token_was_number = False for token in tokenize.generate_tokens(StringIO(asstr(s)).read): @@ -448,7 +449,7 @@ def _filter_header(s): last_token_was_number = (token_type == tokenize.NUMBER) return tokenize.untokenize(tokens) - + def _read_array_header(fp, version): """ see read_array_header_1_0 diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 0a1d05f77..36ce94bad 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -337,6 +337,11 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None): smin[i] = smin[i] - .5 smax[i] = smax[i] + .5 + # avoid rounding issues for comparisons when dealing with inexact types + if np.issubdtype(sample.dtype, np.inexact): + edge_dt = sample.dtype + else: + edge_dt = float # Create edge arrays for i in arange(D): if isscalar(bins[i]): @@ -345,9 +350,9 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None): "Element at index %s in `bins` should be a positive " "integer." % i) nbin[i] = bins[i] + 2 # +2 for outlier bins - edges[i] = linspace(smin[i], smax[i], nbin[i]-1) + edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt) else: - edges[i] = asarray(bins[i], float) + edges[i] = asarray(bins[i], edge_dt) nbin[i] = len(edges[i]) + 1 # +1 for outlier bins dedges[i] = diff(edges[i]) if np.any(np.asarray(dedges[i]) <= 0): @@ -510,8 +515,7 @@ def average(a, axis=None, weights=None, returned=False): scl = avg.dtype.type(a.size/avg.size) else: a = a + 0.0 - wgt = np.array(weights, dtype=a.dtype, copy=0) - + wgt = np.asarray(weights) # Sanity checks if a.shape != wgt.shape: if axis is None: @@ -528,7 +532,7 @@ def average(a, axis=None, weights=None, returned=False): # setup wgt to broadcast along axis wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis) - scl = wgt.sum(axis=axis) + scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype)) if (scl == 0.0).any(): raise ZeroDivisionError( "Weights sum to zero, can't be normalized") @@ -878,28 +882,33 @@ def copy(a, order='K'): # Basic operations -def gradient(f, *varargs): +def gradient(f, *varargs, **kwargs): """ Return the gradient of an N-dimensional array. - + The gradient is computed using second order accurate central differences - in the interior and second order accurate one-sides (forward or backwards) - differences at the boundaries. The returned gradient hence has the same - shape as the input array. + in the interior and either first differences or second order accurate + one-sides (forward or backwards) differences at the boundaries. The + returned gradient hence has the same shape as the input array. Parameters ---------- f : array_like - An N-dimensional array containing samples of a scalar function. - `*varargs` : scalars - 0, 1, or N scalars specifying the sample distances in each direction, - that is: `dx`, `dy`, `dz`, ... The default distance is 1. + An N-dimensional array containing samples of a scalar function. + varargs : list of scalar, optional + N scalars specifying the sample distances for each dimension, + i.e. `dx`, `dy`, `dz`, ... Default distance: 1. + edge_order : {1, 2}, optional + Gradient is calculated using N\ :sup:`th` order accurate differences + at the boundaries. Default: 1. + + .. versionadded:: 1.9.1 Returns ------- gradient : ndarray - N arrays of the same shape as `f` giving the derivative of `f` with - respect to each dimension. + N arrays of the same shape as `f` giving the derivative of `f` with + respect to each dimension. Examples -------- @@ -911,15 +920,14 @@ def gradient(f, *varargs): >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)) [array([[ 2., 2., -1.], - [ 2., 2., -1.]]), - array([[ 1. , 2.5, 4. ], - [ 1. , 1. , 1. ]])] + [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], + [ 1. , 1. , 1. ]])] - >>> x = np.array([0,1,2,3,4]) - >>> dx = gradient(x) + >>> x = np.array([0, 1, 2, 3, 4]) + >>> dx = np.gradient(x) >>> y = x**2 - >>> gradient(y,dx) - array([0., 2., 4., 6., 8.]) + >>> np.gradient(y, dx, edge_order=2) + array([-0., 2., 4., 6., 8.]) """ f = np.asanyarray(f) N = len(f.shape) # number of dimensions @@ -934,6 +942,13 @@ def gradient(f, *varargs): raise SyntaxError( "invalid number of arguments") + edge_order = kwargs.pop('edge_order', 1) + if kwargs: + raise TypeError('"{}" are not valid keyword arguments.'.format( + '", "'.join(kwargs.keys()))) + if edge_order > 2: + raise ValueError("'edge_order' greater than 2 not supported") + # use central differences on interior and one-sided differences on the # endpoints. This preserves second order-accuracy over the full domain. @@ -973,7 +988,7 @@ def gradient(f, *varargs): "at least two elements are required.") # Numerical differentiation: 1st order edges, 2nd order interior - if y.shape[axis] == 2: + if y.shape[axis] == 2 or edge_order == 1: # Use first order differences for time data out = np.empty_like(y, dtype=otype) @@ -1021,7 +1036,8 @@ def gradient(f, *varargs): out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0 # divide by step size - outvals.append(out / dx[axis]) + out /= dx[axis] + outvals.append(out) # reset the slice object in this dimension to ":" slice1[axis] = slice(None) @@ -1097,7 +1113,7 @@ def diff(a, n=1, axis=-1): return a[slice1]-a[slice2] -def interp(x, xp, fp, left=None, right=None): +def interp(x, xp, fp, left=None, right=None, period=None): """ One-dimensional linear interpolation. @@ -1110,7 +1126,9 @@ def interp(x, xp, fp, left=None, right=None): The x-coordinates of the interpolated values. xp : 1-D sequence of floats - The x-coordinates of the data points, must be increasing. + The x-coordinates of the data points, must be increasing if argument + `period` is not specified. Otherwise, `xp` is internally sorted after + normalizing the periodic boundaries with ``xp = xp % period``. fp : 1-D sequence of floats The y-coordinates of the data points, same length as `xp`. @@ -1121,6 +1139,12 @@ def interp(x, xp, fp, left=None, right=None): right : float, optional Value to return for `x > xp[-1]`, default is `fp[-1]`. + period : None or float, optional + .. versionadded:: 1.10.0 + A period for the x-coordinates. This parameter allows the proper + interpolation of angular x-coordinates. Parameters `left` and `right` + are ignored if `period` is specified. + Returns ------- y : {float, ndarray} @@ -1130,6 +1154,8 @@ def interp(x, xp, fp, left=None, right=None): ------ ValueError If `xp` and `fp` have different length + If `xp` or `fp` are not 1-D sequences + If `period == 0` Notes ----- @@ -1139,7 +1165,6 @@ def interp(x, xp, fp, left=None, right=None): np.all(np.diff(xp) > 0) - Examples -------- >>> xp = [1, 2, 3] @@ -1165,13 +1190,51 @@ def interp(x, xp, fp, left=None, right=None): [<matplotlib.lines.Line2D object at 0x...>] >>> plt.show() + Interpolation with periodic x-coordinates: + + >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] + >>> xp = [190, -190, 350, -350] + >>> fp = [5, 10, 3, 4] + >>> np.interp(x, xp, fp, period=360) + array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]) + """ - if isinstance(x, (float, int, number)): - return compiled_interp([x], xp, fp, left, right).item() - elif isinstance(x, np.ndarray) and x.ndim == 0: - return compiled_interp([x], xp, fp, left, right).item() + if period is None: + if isinstance(x, (float, int, number)): + return compiled_interp([x], xp, fp, left, right).item() + elif isinstance(x, np.ndarray) and x.ndim == 0: + return compiled_interp([x], xp, fp, left, right).item() + else: + return compiled_interp(x, xp, fp, left, right) else: - return compiled_interp(x, xp, fp, left, right) + if period == 0: + raise ValueError("period must be a non-zero value") + period = abs(period) + left = None + right = None + return_array = True + if isinstance(x, (float, int, number)): + return_array = False + x = [x] + x = np.asarray(x, dtype=np.float64) + xp = np.asarray(xp, dtype=np.float64) + fp = np.asarray(fp, dtype=np.float64) + if xp.ndim != 1 or fp.ndim != 1: + raise ValueError("Data points must be 1-D sequences") + if xp.shape[0] != fp.shape[0]: + raise ValueError("fp and xp are not of the same length") + # normalizing periodic boundaries + x = x % period + xp = xp % period + asort_xp = np.argsort(xp) + xp = xp[asort_xp] + fp = fp[asort_xp] + xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) + fp = np.concatenate((fp[-1:], fp, fp[0:1])) + if return_array: + return compiled_interp(x, xp, fp, left, right) + else: + return compiled_interp(x, xp, fp, left, right).item() def angle(z, deg=0): @@ -1387,6 +1450,8 @@ def extract(condition, arr): This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. + Note that `place` does the exact opposite of `extract`. + Parameters ---------- condition : array_like @@ -1402,7 +1467,7 @@ def extract(condition, arr): See Also -------- - take, put, copyto, compress + take, put, copyto, compress, place Examples -------- @@ -2998,7 +3063,7 @@ def percentile(a, q, axis=None, out=None, nearest neighbors as well as the `interpolation` parameter will determine the percentile if the normalized ranking does not match q exactly. This function is the same as the median if ``q=50``, the same - as the minimum if ``q=0``and the same as the maximum if ``q=100``. + as the minimum if ``q=0`` and the same as the maximum if ``q=100``. Examples -------- @@ -3031,7 +3096,7 @@ def percentile(a, q, axis=None, out=None, array([ 3.5]) """ - q = asarray(q, dtype=np.float64) + q = array(q, dtype=np.float64, copy=True) r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out, overwrite_input=overwrite_input, interpolation=interpolation) @@ -3758,7 +3823,9 @@ def insert(arr, obj, values, axis=None): if (index < 0): index += N - values = array(values, copy=False, ndmin=arr.ndim) + # There are some object array corner cases here, but we cannot avoid + # that: + values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) if indices.ndim == 0: # broadcasting is very different here, since a[:,0,:] = ... behaves # very different from a[:,[0],:] = ...! This changes values so that diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py index 98c6b291b..f83024961 100644 --- a/numpy/lib/index_tricks.py +++ b/numpy/lib/index_tricks.py @@ -727,6 +727,7 @@ def fill_diagonal(a, val, wrap=False): # tall matrices no wrap >>> a = np.zeros((5, 3),int) >>> fill_diagonal(a, 4) + >>> a array([[4, 0, 0], [0, 4, 0], [0, 0, 4], @@ -735,7 +736,8 @@ def fill_diagonal(a, val, wrap=False): # tall matrices wrap >>> a = np.zeros((5, 3),int) - >>> fill_diagonal(a, 4) + >>> fill_diagonal(a, 4, wrap=True) + >>> a array([[4, 0, 0], [0, 4, 0], [0, 0, 4], @@ -744,7 +746,8 @@ def fill_diagonal(a, val, wrap=False): # wide matrices >>> a = np.zeros((3, 5),int) - >>> fill_diagonal(a, 4) + >>> fill_diagonal(a, 4, wrap=True) + >>> a array([[4, 0, 0, 0, 0], [0, 4, 0, 0, 0], [0, 0, 4, 0, 0]]) diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index f5ac35e54..7260a35b8 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -33,6 +33,10 @@ def _replace_nan(a, val): marking the locations where NaNs were present. If `a` is not of inexact type, do nothing and return `a` together with a mask of None. + Note that scalars will end up as array scalars, which is important + for using the result as the value of the out argument in some + operations. + Parameters ---------- a : array-like @@ -1037,7 +1041,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): avg = _divide_by_count(avg, cnt) # Compute squared deviation from mean. - arr -= avg + np.subtract(arr, avg, out=arr, casting='unsafe') arr = _copyto(arr, 0, mask) if issubclass(arr.dtype.type, np.complexfloating): sqr = np.multiply(arr, arr.conj(), out=arr).real diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index fe855a71a..641203f34 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -288,8 +288,7 @@ def load(file, mmap_mode=None): Parameters ---------- file : file-like object or string - The file to read. Compressed files with the filename extension - ``.gz`` are acceptable. File-like objects must support the + The file to read. File-like objects must support the ``seek()`` and ``read()`` methods. Pickled files require that the file-like object support the ``readline()`` method as well. mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional @@ -1519,7 +1518,9 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, # Process the filling_values ............................... # Rename the input for convenience - user_filling_values = filling_values or [] + user_filling_values = filling_values + if user_filling_values is None: + user_filling_values = [] # Define the default filling_values = [None] * nbcols # We have a dictionary : update each entry individually @@ -1574,22 +1575,25 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, for (miss, fill) in zipit] # Update the converters to use the user-defined ones uc_update = [] - for (i, conv) in user_converters.items(): + for (j, conv) in user_converters.items(): # If the converter is specified by column names, use the index instead - if _is_string_like(i): + if _is_string_like(j): try: - i = names.index(i) + j = names.index(j) + i = j except ValueError: continue elif usecols: try: - i = usecols.index(i) + i = usecols.index(j) except ValueError: # Unused converter specified continue - # Find the value to test: + else: + i = j + # Find the value to test - first_line is not filtered by usecols: if len(first_line): - testing_value = first_values[i] + testing_value = first_values[j] else: testing_value = None converters[i].update(conv, locked=True, diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py index 6a1adc773..2b867e244 100644 --- a/numpy/lib/polynomial.py +++ b/numpy/lib/polynomial.py @@ -12,10 +12,11 @@ import re import warnings import numpy.core.numeric as NX -from numpy.core import isscalar, abs, finfo, atleast_1d, hstack, dot +from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array, + ones) from numpy.lib.twodim_base import diag, vander from numpy.lib.function_base import trim_zeros, sort_complex -from numpy.lib.type_check import iscomplex, real, imag +from numpy.lib.type_check import iscomplex, real, imag, mintypecode from numpy.linalg import eigvals, lstsq, inv class RankWarning(UserWarning): @@ -122,19 +123,24 @@ def poly(seq_of_zeros): """ seq_of_zeros = atleast_1d(seq_of_zeros) sh = seq_of_zeros.shape + if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0: seq_of_zeros = eigvals(seq_of_zeros) elif len(sh) == 1: - pass + dt = seq_of_zeros.dtype + # Let object arrays slip through, e.g. for arbitrary precision + if dt != object: + seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char)) else: raise ValueError("input must be 1d or non-empty square 2d array.") if len(seq_of_zeros) == 0: return 1.0 - - a = [1] + dt = seq_of_zeros.dtype + a = ones((1,), dtype=dt) for k in range(len(seq_of_zeros)): - a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full') + a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt), + mode='full') if issubclass(a.dtype.type, NX.complexfloating): # if complex roots are all complex conjugates, the roots are real. @@ -671,7 +677,7 @@ def polyval(p, x): x = NX.asarray(x) y = NX.zeros_like(x) for i in range(len(p)): - y = x * y + p[i] + y = y * x + p[i] return y def polyadd(a1, a2): diff --git a/numpy/lib/setup.py b/numpy/lib/setup.py index 68d99c33a..62d1dfbb8 100644 --- a/numpy/lib/setup.py +++ b/numpy/lib/setup.py @@ -13,7 +13,6 @@ def configuration(parent_package='',top_path=None): sources=[join('src', '_compiled_base.c')] ) - config.add_data_dir('benchmarks') config.add_data_dir('tests') return config diff --git a/numpy/lib/src/_compiled_base.c b/numpy/lib/src/_compiled_base.c index a461613e3..daf96a823 100644 --- a/numpy/lib/src/_compiled_base.c +++ b/numpy/lib/src/_compiled_base.c @@ -8,58 +8,6 @@ #include "string.h" -static npy_intp -incr_slot_(double x, double *bins, npy_intp lbins) -{ - npy_intp i; - - for ( i = 0; i < lbins; i ++ ) { - if ( x < bins [i] ) { - return i; - } - } - return lbins; -} - -static npy_intp -decr_slot_(double x, double * bins, npy_intp lbins) -{ - npy_intp i; - - for ( i = lbins - 1; i >= 0; i -- ) { - if (x < bins [i]) { - return i + 1; - } - } - return 0; -} - -static npy_intp -incr_slot_right_(double x, double *bins, npy_intp lbins) -{ - npy_intp i; - - for ( i = 0; i < lbins; i ++ ) { - if ( x <= bins [i] ) { - return i; - } - } - return lbins; -} - -static npy_intp -decr_slot_right_(double x, double * bins, npy_intp lbins) -{ - npy_intp i; - - for ( i = lbins - 1; i >= 0; i -- ) { - if (x <= bins [i]) { - return i + 1; - } - } - return 0; -} - /* * Returns -1 if the array is monotonic decreasing, * +1 if the array is monotonic increasing, @@ -125,6 +73,7 @@ minmax(const npy_intp *data, npy_intp data_len, npy_intp *mn, npy_intp *mx) *mn = min; *mx = max; } + /* * arr_bincount is registered as bincount. * @@ -244,143 +193,111 @@ fail: return NULL; } - /* - * digitize (x, bins, right=False) returns an array of python integers the same - * length of x. The values i returned are such that bins [i - 1] <= x < - * bins [i] if bins is monotonically increasing, or bins [i - 1] > x >= - * bins [i] if bins is monotonically decreasing. Beyond the bounds of - * bins, returns either i = 0 or i = len (bins) as appropriate. - * if right == True the comparison is bins [i - 1] < x <= bins[i] - * or bins [i - 1] >= x > bins[i] + * digitize(x, bins, right=False) returns an array of integers the same length + * as x. The values i returned are such that bins[i - 1] <= x < bins[i] if + * bins is monotonically increasing, or bins[i - 1] > x >= bins[i] if bins + * is monotonically decreasing. Beyond the bounds of bins, returns either + * i = 0 or i = len(bins) as appropriate. If right == True the comparison + * is bins [i - 1] < x <= bins[i] or bins [i - 1] >= x > bins[i] */ static PyObject * arr_digitize(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) { - /* self is not used */ - PyObject *ox, *obins; - PyArrayObject *ax = NULL, *abins = NULL, *aret = NULL; - double *dx, *dbins; - npy_intp lbins, lx; /* lengths */ - npy_intp right = 0; /* whether right or left is inclusive */ - npy_intp *iret; - int m, i; + PyObject *obj_x = NULL; + PyObject *obj_bins = NULL; + PyArrayObject *arr_x = NULL; + PyArrayObject *arr_bins = NULL; + PyObject *ret = NULL; + npy_intp len_bins; + int monotonic, right = 0; + NPY_BEGIN_THREADS_DEF + static char *kwlist[] = {"x", "bins", "right", NULL}; - PyArray_Descr *type; - char bins_non_monotonic = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|i", kwlist, &ox, &obins, - &right)) { + if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|i", kwlist, + &obj_x, &obj_bins, &right)) { goto fail; } - type = PyArray_DescrFromType(NPY_DOUBLE); - ax = (PyArrayObject *)PyArray_FromAny(ox, type, - 1, 1, NPY_ARRAY_CARRAY, NULL); - if (ax == NULL) { + + /* PyArray_SearchSorted will make `x` contiguous even if we don't */ + arr_x = (PyArrayObject *)PyArray_FROMANY(obj_x, NPY_DOUBLE, 0, 0, + NPY_ARRAY_CARRAY_RO); + if (arr_x == NULL) { goto fail; } - Py_INCREF(type); - abins = (PyArrayObject *)PyArray_FromAny(obins, type, - 1, 1, NPY_ARRAY_CARRAY, NULL); - if (abins == NULL) { + + /* TODO: `bins` could be strided, needs change to check_array_monotonic */ + arr_bins = (PyArrayObject *)PyArray_FROMANY(obj_bins, NPY_DOUBLE, 1, 1, + NPY_ARRAY_CARRAY_RO); + if (arr_bins == NULL) { goto fail; } - lx = PyArray_SIZE(ax); - dx = (double *)PyArray_DATA(ax); - lbins = PyArray_SIZE(abins); - dbins = (double *)PyArray_DATA(abins); - aret = (PyArrayObject *)PyArray_SimpleNew(1, &lx, NPY_INTP); - if (aret == NULL) { + len_bins = PyArray_SIZE(arr_bins); + if (len_bins == 0) { + PyErr_SetString(PyExc_ValueError, "bins must have non-zero length"); goto fail; } - iret = (npy_intp *)PyArray_DATA(aret); - if (lx <= 0 || lbins < 0) { + NPY_BEGIN_THREADS_THRESHOLDED(len_bins) + monotonic = check_array_monotonic((const double *)PyArray_DATA(arr_bins), + len_bins); + NPY_END_THREADS + + if (monotonic == 0) { PyErr_SetString(PyExc_ValueError, - "Both x and bins must have non-zero length"); - goto fail; + "bins must be monotonically increasing or decreasing"); + goto fail; } - NPY_BEGIN_ALLOW_THREADS; - if (lbins == 1) { - if (right == 0) { - for (i = 0; i < lx; i++) { - if (dx [i] >= dbins[0]) { - iret[i] = 1; - } - else { - iret[i] = 0; - } - } - } - else { - for (i = 0; i < lx; i++) { - if (dx [i] > dbins[0]) { - iret[i] = 1; - } - else { - iret[i] = 0; - } - } + /* PyArray_SearchSorted needs an increasing array */ + if (monotonic == - 1) { + PyArrayObject *arr_tmp = NULL; + npy_intp shape = PyArray_DIM(arr_bins, 0); + npy_intp stride = -PyArray_STRIDE(arr_bins, 0); + void *data = (void *)(PyArray_BYTES(arr_bins) - stride * (shape - 1)); + + arr_tmp = (PyArrayObject *)PyArray_New(&PyArray_Type, 1, &shape, + NPY_DOUBLE, &stride, data, 0, + PyArray_FLAGS(arr_bins), NULL); + if (!arr_tmp) { + goto fail; } - } - else { - m = check_array_monotonic(dbins, lbins); - if (right == 0) { - if ( m == -1 ) { - for ( i = 0; i < lx; i ++ ) { - iret [i] = decr_slot_ ((double)dx[i], dbins, lbins); - } - } - else if ( m == 1 ) { - for ( i = 0; i < lx; i ++ ) { - iret [i] = incr_slot_ ((double)dx[i], dbins, lbins); - } - } - else { - /* defer PyErr_SetString until after NPY_END_ALLOW_THREADS */ - bins_non_monotonic = 1; - } - } - else { - if ( m == -1 ) { - for ( i = 0; i < lx; i ++ ) { - iret [i] = decr_slot_right_ ((double)dx[i], dbins, - lbins); - } - } - else if ( m == 1 ) { - for ( i = 0; i < lx; i ++ ) { - iret [i] = incr_slot_right_ ((double)dx[i], dbins, - lbins); - } - } - else { - /* defer PyErr_SetString until after NPY_END_ALLOW_THREADS */ - bins_non_monotonic = 1; - } + if (PyArray_SetBaseObject(arr_tmp, (PyObject *)arr_bins) < 0) { + + Py_DECREF(arr_tmp); + goto fail; } + arr_bins = arr_tmp; } - NPY_END_ALLOW_THREADS; - if (bins_non_monotonic) { - PyErr_SetString(PyExc_ValueError, - "The bins must be monotonically increasing or decreasing"); + + ret = PyArray_SearchSorted(arr_bins, (PyObject *)arr_x, + right ? NPY_SEARCHLEFT : NPY_SEARCHRIGHT, NULL); + if (!ret) { goto fail; } - Py_DECREF(ax); - Py_DECREF(abins); - return (PyObject *)aret; -fail: - Py_XDECREF(ax); - Py_XDECREF(abins); - Py_XDECREF(aret); - return NULL; -} + /* If bins is decreasing, ret has bins from end, not start */ + if (monotonic == -1) { + npy_intp *ret_data = + (npy_intp *)PyArray_DATA((PyArrayObject *)ret); + npy_intp len_ret = PyArray_SIZE((PyArrayObject *)ret); + NPY_BEGIN_THREADS_THRESHOLDED(len_ret) + while (len_ret--) { + *ret_data = len_bins - *ret_data; + ret_data++; + } + NPY_END_THREADS + } + fail: + Py_DECREF(arr_x); + Py_DECREF(arr_bins); + return ret; +} static char arr_insert__doc__[] = "Insert vals sequentially into equivalent 1-d positions indicated by mask."; diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py index 12f8bbf13..b81307a65 100644 --- a/numpy/lib/stride_tricks.py +++ b/numpy/lib/stride_tricks.py @@ -20,9 +20,11 @@ class DummyArray(object): self.__array_interface__ = interface self.base = base -def as_strided(x, shape=None, strides=None): +def as_strided(x, shape=None, strides=None, subok=False): """ Make an ndarray from the given array with the given shape and strides. """ + # first convert input to array, possibly keeping subclass + x = np.array(x, copy=False, subok=subok) interface = dict(x.__array_interface__) if shape is not None: interface['shape'] = tuple(shape) @@ -32,9 +34,17 @@ def as_strided(x, shape=None, strides=None): # Make sure dtype is correct in case of custom dtype if array.dtype.kind == 'V': array.dtype = x.dtype + if type(x) is not type(array): + # if input was an ndarray subclass and subclasses were OK, + # then view the result as that subclass. + array = array.view(type=type(x)) + # Since we have done something akin to a view from x, we should let + # the subclass finalize (if it has it implemented, i.e., is not None). + if array.__array_finalize__: + array.__array_finalize__(x) return array -def broadcast_arrays(*args): +def broadcast_arrays(*args, **kwargs): """ Broadcast any number of arrays against each other. @@ -43,6 +53,10 @@ def broadcast_arrays(*args): `*args` : array_likes The arrays to broadcast. + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned arrays will be forced to be a base-class array (default). + Returns ------- broadcasted : list of arrays @@ -73,7 +87,11 @@ def broadcast_arrays(*args): [3, 3, 3]])] """ - args = [np.asarray(_m) for _m in args] + subok = kwargs.pop('subok', False) + if kwargs: + raise TypeError('broadcast_arrays() got an unexpected keyword ' + 'argument {}'.format(kwargs.pop())) + args = [np.array(_m, copy=False, subok=subok) for _m in args] shapes = [x.shape for x in args] if len(set(shapes)) == 1: # Common case where nothing needs to be broadcasted. @@ -118,6 +136,6 @@ def broadcast_arrays(*args): common_shape.append(1) # Construct the new arrays. - broadcasted = [as_strided(x, shape=sh, strides=st) for (x, sh, st) in - zip(args, shapes, strides)] + broadcasted = [as_strided(x, shape=sh, strides=st, subok=subok) + for (x, sh, st) in zip(args, shapes, strides)] return broadcasted diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index e83f8552e..39196f4bc 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -109,6 +109,12 @@ class TestSetOps(TestCase): assert_array_equal(a2, unq) assert_array_equal(a2_inv, inv) + # test for chararrays with return_inverse (gh-5099) + a = np.chararray(5) + a[...] = '' + a2, a2_inv = np.unique(a, return_inverse=True) + assert_array_equal(a2_inv, np.zeros(5)) + def test_intersect1d(self): # unique inputs a = np.array([5, 7, 1, 2]) diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index b266f1c15..ee77386bc 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -688,28 +688,28 @@ def test_bad_header(): def test_large_file_support(): from nose import SkipTest + if (sys.platform == 'win32' or sys.platform == 'cygwin'): + raise SkipTest("Unknown if Windows has sparse filesystems") # try creating a large sparse file - with tempfile.NamedTemporaryFile() as tf: - try: - # seek past end would work too, but linux truncate somewhat - # increases the chances that we have a sparse filesystem and can - # avoid actually writing 5GB - import subprocess as sp - sp.check_call(["truncate", "-s", "5368709120", tf.name]) - except: - raise SkipTest("Could not create 5GB large file") - # write a small array to the end - f = open(tf.name, "wb") + tf_name = os.path.join(tempdir, 'sparse_file') + try: + # seek past end would work too, but linux truncate somewhat + # increases the chances that we have a sparse filesystem and can + # avoid actually writing 5GB + import subprocess as sp + sp.check_call(["truncate", "-s", "5368709120", tf_name]) + except: + raise SkipTest("Could not create 5GB large file") + # write a small array to the end + with open(tf_name, "wb") as f: f.seek(5368709120) d = np.arange(5) np.save(f, d) - f.close() - # read it back - f = open(tf.name, "rb") + # read it back + with open(tf_name, "rb") as f: f.seek(5368709120) r = np.load(f) - f.close() - assert_array_equal(r, d) + assert_array_equal(r, d) if __name__ == "__main__": diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index a3f805691..80faf85a6 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -124,6 +124,11 @@ class TestAverage(TestCase): assert_array_equal(average(y1, weights=w2, axis=1), desired) assert_equal(average(y1, weights=w2), 5.) + y3 = rand(5).astype(np.float32) + w3 = rand(5).astype(np.float64) + + assert_(np.average(y3, weights=w3).dtype == np.result_type(y3, w3)) + def test_returned(self): y = np.array([[1, 2, 3], [4, 5, 6]]) @@ -312,6 +317,16 @@ class TestInsert(TestCase): np.insert([0, 1, 2], x, [3, 4, 5]) assert_equal(x, np.array([1, 1, 1])) + def test_structured_array(self): + a = np.array([(1, 'a'), (2, 'b'), (3, 'c')], + dtype=[('foo', 'i'), ('bar', 'a1')]) + val = (4, 'd') + b = np.insert(a, 0, val) + assert_array_equal(b[0], np.array(val, dtype=b.dtype)) + val = [(4, 'd')] * 2 + b = np.insert(a, [0, 2], val) + assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype)) + class TestAmax(TestCase): def test_basic(self): @@ -516,8 +531,18 @@ class TestGradient(TestCase): def test_masked(self): # Make sure that gradient supports subclasses like masked arrays - x = np.ma.array([[1, 1], [3, 4]]) - assert_equal(type(gradient(x)[0]), type(x)) + x = np.ma.array([[1, 1], [3, 4]], + mask=[[False, False], [False, False]]) + out = gradient(x)[0] + assert_equal(type(out), type(x)) + # And make sure that the output and input don't have aliased mask + # arrays + assert_(x.mask is not out.mask) + # Also check that edge_order=2 doesn't alter the original mask + x2 = np.ma.arange(5) + x2[2] = np.ma.masked + np.gradient(x2, edge_order=2) + assert_array_equal(x2.mask, [False, False, True, False, False]) def test_datetime64(self): # Make sure gradient() can handle special types like datetime64 @@ -526,7 +551,7 @@ class TestGradient(TestCase): '1910-10-12', '1910-12-12', '1912-12-12'], dtype='datetime64[D]') dx = np.array( - [-7, -3, 0, 31, 61, 396, 1066], + [-5, -3, 0, 31, 61, 396, 731], dtype='timedelta64[D]') assert_array_equal(gradient(x), dx) assert_(dx.dtype == np.dtype('timedelta64[D]')) @@ -537,7 +562,7 @@ class TestGradient(TestCase): [-5, -3, 10, 12, 61, 321, 300], dtype='timedelta64[D]') dx = np.array( - [-3, 7, 7, 25, 154, 119, -161], + [2, 7, 7, 25, 154, 119, -21], dtype='timedelta64[D]') assert_array_equal(gradient(x), dx) assert_(dx.dtype == np.dtype('timedelta64[D]')) @@ -551,7 +576,7 @@ class TestGradient(TestCase): dx = x[1] - x[0] y = 2 * x ** 3 + 4 * x ** 2 + 2 * x analytical = 6 * x ** 2 + 8 * x + 2 - num_error = np.abs((np.gradient(y, dx) / analytical) - 1) + num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1) assert_(np.all(num_error < 0.03) == True) @@ -1072,6 +1097,13 @@ class TestHistogram(TestCase): h, b = histogram(a, weights=np.ones(10, float)) assert_(issubdtype(h.dtype, float)) + def test_f32_rounding(self): + # gh-4799, check that the rounding of the edges works with float32 + x = np.array([276.318359 , -69.593948 , 21.329449], dtype=np.float32) + y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32) + counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100) + assert_equal(counts_hist.sum(), 3.) + def test_weights(self): v = rand(100) w = np.ones(100) * 5 @@ -1460,7 +1492,7 @@ class TestMeshgrid(TestCase): # Test that meshgrid complains about invalid arguments # Regression test for issue #4755: # https://github.com/numpy/numpy/issues/4755 - assert_raises(TypeError, meshgrid, + assert_raises(TypeError, meshgrid, [1, 2, 3], [4, 5, 6, 7], indices='ij') @@ -1587,6 +1619,9 @@ class TestInterp(TestCase): def test_exceptions(self): assert_raises(ValueError, interp, 0, [], []) assert_raises(ValueError, interp, 0, [0], [1, 2]) + assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0) + assert_raises(ValueError, interp, 0, [], [], period=360) + assert_raises(ValueError, interp, 0, [0], [1, 2], period=360) def test_basic(self): x = np.linspace(0, 1, 5) @@ -1627,6 +1662,16 @@ class TestInterp(TestCase): fp = np.sin(xp) assert_almost_equal(np.interp(np.pi, xp, fp), 0.0) + def test_period(self): + x = [-180, -170, -185, 185, -10, -5, 0, 365] + xp = [190, -190, 350, -350] + fp = [5, 10, 3, 4] + y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75] + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + x = np.array(x, order='F').reshape(2, -1) + y = np.array(y, order='C').reshape(2, -1) + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + def compare_results(res, desired): for i in range(len(desired)): @@ -1860,6 +1905,14 @@ class TestScoreatpercentile(TestCase): np.percentile(a, [50]) assert_equal(a, np.array([2, 3, 4, 1])) + def test_no_p_overwrite(self): + p = np.linspace(0., 100., num=5) + np.percentile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5)) + p = np.linspace(0., 100., num=5).tolist() + np.percentile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5).tolist()) + def test_percentile_overwrite(self): a = np.array([2, 3, 4, 1]) b = np.percentile(a, [50], overwrite_input=True) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 49ad1ba5b..4038d6a7f 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -4,9 +4,7 @@ import sys import gzip import os import threading -import shutil -import contextlib -from tempfile import mkstemp, mkdtemp, NamedTemporaryFile +from tempfile import mkstemp, NamedTemporaryFile import time import warnings import gc @@ -24,13 +22,7 @@ from numpy.ma.testutils import ( assert_raises, assert_raises_regex, run_module_suite ) from numpy.testing import assert_warns, assert_, build_err_msg - - -@contextlib.contextmanager -def tempdir(change_dir=False): - tmpdir = mkdtemp() - yield tmpdir - shutil.rmtree(tmpdir) +from numpy.testing.utils import tempdir class TextIO(BytesIO): @@ -202,7 +194,7 @@ class TestSavezLoad(RoundtripTest, TestCase): def test_big_arrays(self): L = (1 << 31) + 100000 a = np.empty(L, dtype=np.uint8) - with tempdir() as tmpdir: + with tempdir(prefix="numpy_test_big_arrays_") as tmpdir: tmp = os.path.join(tmpdir, "file.npz") np.savez(tmp, a=a) del a @@ -311,7 +303,7 @@ class TestSavezLoad(RoundtripTest, TestCase): # Check that zipfile owns file and can close it. # This needs to pass a file name to load for the # test. - with tempdir() as tmpdir: + with tempdir(prefix="numpy_test_closing_zipfile_after_load_") as tmpdir: fd, tmp = mkstemp(suffix='.npz', dir=tmpdir) os.close(fd) np.savez(tmp, lab='place holder') @@ -1093,6 +1085,21 @@ M 33 21.99 control = np.array([2009., 23., 46],) assert_equal(test, control) + def test_dtype_with_converters_and_usecols(self): + dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n" + dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3} + dtyp = [('E1','i4'),('E2','i4'),('E3','i2'),('N', 'i1')] + conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} + test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', + names=None, converters=conv) + control = np.rec.array([[1,5,-1,0], [2,8,-1,1], [3,3,-2,3]], dtype=dtyp) + assert_equal(test, control) + dtyp = [('E1','i4'),('E2','i4'),('N', 'i1')] + test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', + usecols=(0,1,3), names=None, converters=conv) + control = np.rec.array([[1,5,0], [2,8,1], [3,3,3]], dtype=dtyp) + assert_equal(test, control) + def test_dtype_with_object(self): "Test using an explicit dtype with an object" from datetime import date @@ -1308,6 +1315,16 @@ M 33 21.99 ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"]) assert_equal(test, ctrl) + data2 = "1,2,*,4\n5,*,7,8\n" + test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, + missing_values="*", filling_values=0) + ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]]) + assert_equal(test, ctrl) + test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, + missing_values="*", filling_values=-1) + ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]]) + assert_equal(test, ctrl) + def test_withmissing_float(self): data = TextIO('A,B\n0,1.5\n2,-999.00') test = np.mafromtxt(data, dtype=None, delimiter=',', diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 3da6b5149..35ae86c20 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -645,6 +645,22 @@ class TestNanFunctions_Median(TestCase): assert_raises(IndexError, np.nanmedian, d, axis=(0, 4)) assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) + def test_float_special(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('ignore', RuntimeWarning) + a = np.array([[np.inf, np.nan], [np.nan, np.nan]]) + assert_equal(np.nanmedian(a, axis=0), [np.inf, np.nan]) + assert_equal(np.nanmedian(a, axis=1), [np.inf, np.nan]) + assert_equal(np.nanmedian(a), np.inf) + + # minimum fill value check + a = np.array([[np.nan, np.nan, np.inf], [np.nan, np.nan, np.inf]]) + assert_equal(np.nanmedian(a, axis=1), np.inf) + + # no mask path + a = np.array([[np.inf, np.inf], [np.inf, np.inf]]) + assert_equal(np.nanmedian(a, axis=1), np.inf) + class TestNanFunctions_Percentile(TestCase): diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py index 02faa0283..5c15941e6 100644 --- a/numpy/lib/tests/test_polynomial.py +++ b/numpy/lib/tests/test_polynomial.py @@ -153,6 +153,9 @@ class TestDocs(TestCase): assert_(p2[3] == Decimal("1.333333333333333333333333333")) assert_(p2[2] == Decimal('1.5')) assert_(np.issubdtype(p2.coeffs.dtype, np.object_)) + p = np.poly([Decimal(1), Decimal(2)]) + assert_equal(np.poly([Decimal(1), Decimal(2)]), + [1, Decimal(-3), Decimal(2)]) def test_complex(self): p = np.poly1d([3j, 2j, 1j]) @@ -173,5 +176,13 @@ class TestDocs(TestCase): except ValueError: pass + def test_poly_int_overflow(self): + """ + Regression test for gh-5096. + """ + v = np.arange(1, 21) + assert_almost_equal(np.poly(v), np.poly(np.diag(v))) + + if __name__ == "__main__": run_module_suite() diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py index cd0973300..bc7e30ca4 100644 --- a/numpy/lib/tests/test_stride_tricks.py +++ b/numpy/lib/tests/test_stride_tricks.py @@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function import numpy as np from numpy.testing import ( run_module_suite, assert_equal, assert_array_equal, - assert_raises + assert_raises, assert_ ) from numpy.lib.stride_tricks import as_strided, broadcast_arrays @@ -234,5 +234,49 @@ def test_as_strided(): assert_array_equal(a_view, expected) +class VerySimpleSubClass(np.ndarray): + def __new__(cls, *args, **kwargs): + kwargs['subok'] = True + return np.array(*args, **kwargs).view(cls) + + +class SimpleSubClass(VerySimpleSubClass): + def __new__(cls, *args, **kwargs): + kwargs['subok'] = True + self = np.array(*args, **kwargs).view(cls) + self.info = 'simple' + return self + + def __array_finalize__(self, obj): + self.info = getattr(obj, 'info', '') + ' finalized' + + +def test_subclasses(): + # test that subclass is preserved only if subok=True + a = VerySimpleSubClass([1, 2, 3, 4]) + assert_(type(a) is VerySimpleSubClass) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) + assert_(type(a_view) is np.ndarray) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) + assert_(type(a_view) is VerySimpleSubClass) + # test that if a subclass has __array_finalize__, it is used + a = SimpleSubClass([1, 2, 3, 4]) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + + # similar tests for broadcast_arrays + b = np.arange(len(a)).reshape(-1, 1) + a_view, b_view = broadcast_arrays(a, b) + assert_(type(a_view) is np.ndarray) + assert_(type(b_view) is np.ndarray) + assert_(a_view.shape == b_view.shape) + a_view, b_view = broadcast_arrays(a, b, subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + assert_(type(b_view) is np.ndarray) + assert_(a_view.shape == b_view.shape) + + if __name__ == "__main__": run_module_suite() diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py index e9dbef70f..739061a5d 100644 --- a/numpy/lib/tests/test_twodim_base.py +++ b/numpy/lib/tests/test_twodim_base.py @@ -311,6 +311,40 @@ def test_tril_triu_ndim3(): yield assert_equal, a_triu_observed.dtype, a.dtype yield assert_equal, a_tril_observed.dtype, a.dtype +def test_tril_triu_with_inf(): + # Issue 4859 + arr = np.array([[1, 1, np.inf], + [1, 1, 1], + [np.inf, 1, 1]]) + out_tril = np.array([[1, 0, 0], + [1, 1, 0], + [np.inf, 1, 1]]) + out_triu = out_tril.T + assert_array_equal(np.triu(arr), out_triu) + assert_array_equal(np.tril(arr), out_tril) + + +def test_tril_triu_dtype(): + # Issue 4916 + # tril and triu should return the same dtype as input + for c in np.typecodes['All']: + if c == 'V': + continue + arr = np.zeros((3, 3), dtype=c) + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + # check special cases + arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'], + ['2004-01-01T12:00', '2003-01-03T13:45']], + dtype='datetime64') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + arr = np.zeros((3,3), dtype='f4,f4') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + def test_mask_indices(): # simple test without offset diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index 2861e1c4a..40a140b6b 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -387,7 +387,6 @@ def tri(N, M=None, k=0, dtype=float): dtype : dtype, optional Data type of the returned array. The default is float. - Returns ------- tri : ndarray of shape (N, M) @@ -452,7 +451,9 @@ def tril(m, k=0): """ m = asanyarray(m) - return multiply(tri(*m.shape[-2:], k=k, dtype=bool), m, dtype=m.dtype) + mask = tri(*m.shape[-2:], k=k, dtype=bool) + + return where(mask, m, zeros(1, m.dtype)) def triu(m, k=0): @@ -478,7 +479,9 @@ def triu(m, k=0): """ m = asanyarray(m) - return multiply(~tri(*m.shape[-2:], k=k-1, dtype=bool), m, dtype=m.dtype) + mask = tri(*m.shape[-2:], k=k-1, dtype=bool) + + return where(mask, zeros(1, m.dtype), m) # Originally borrowed from John Hunter and matplotlib diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index df0052493..519d0e9b9 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -4,6 +4,7 @@ import os import sys import types import re +import warnings from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype from numpy.core import ndarray, ufunc, asarray @@ -1002,111 +1003,70 @@ class SafeEval(object): This includes strings with lists, dicts and tuples using the abstract syntax tree created by ``compiler.parse``. - For an example of usage, see `safe_eval`. + .. deprecated:: 1.10.0 See Also -------- safe_eval """ + def __init__(self): + warnings.warn("SafeEval is deprecated in 1.10 and will be removed.", + DeprecationWarning) - if sys.version_info[0] < 3: - def visit(self, node, **kw): - cls = node.__class__ - meth = getattr(self, 'visit'+cls.__name__, self.default) - return meth(node, **kw) + def visit(self, node): + cls = node.__class__ + meth = getattr(self, 'visit' + cls.__name__, self.default) + return meth(node) - def default(self, node, **kw): - raise SyntaxError("Unsupported source construct: %s" - % node.__class__) + def default(self, node): + raise SyntaxError("Unsupported source construct: %s" + % node.__class__) - def visitExpression(self, node, **kw): - for child in node.getChildNodes(): - return self.visit(child, **kw) + def visitExpression(self, node): + return self.visit(node.body) - def visitConst(self, node, **kw): - return node.value + def visitNum(self, node): + return node.n - def visitDict(self, node,**kw): - return dict( - [(self.visit(k), self.visit(v)) for k, v in node.items] - ) - - def visitTuple(self, node, **kw): - return tuple([self.visit(i) for i in node.nodes]) - - def visitList(self, node, **kw): - return [self.visit(i) for i in node.nodes] - - def visitUnaryAdd(self, node, **kw): - return +self.visit(node.getChildNodes()[0]) - - def visitUnarySub(self, node, **kw): - return -self.visit(node.getChildNodes()[0]) - - def visitName(self, node, **kw): - if node.name == 'False': - return False - elif node.name == 'True': - return True - elif node.name == 'None': - return None - else: - raise SyntaxError("Unknown name: %s" % node.name) - else: - - def visit(self, node): - cls = node.__class__ - meth = getattr(self, 'visit' + cls.__name__, self.default) - return meth(node) - - def default(self, node): - raise SyntaxError("Unsupported source construct: %s" - % node.__class__) - - def visitExpression(self, node): - return self.visit(node.body) - - def visitNum(self, node): - return node.n + def visitStr(self, node): + return node.s - def visitStr(self, node): - return node.s + def visitBytes(self, node): + return node.s - def visitBytes(self, node): - return node.s + def visitDict(self, node,**kw): + return dict([(self.visit(k), self.visit(v)) + for k, v in zip(node.keys, node.values)]) - def visitDict(self, node,**kw): - return dict([(self.visit(k), self.visit(v)) - for k, v in zip(node.keys, node.values)]) + def visitTuple(self, node): + return tuple([self.visit(i) for i in node.elts]) - def visitTuple(self, node): - return tuple([self.visit(i) for i in node.elts]) + def visitList(self, node): + return [self.visit(i) for i in node.elts] - def visitList(self, node): - return [self.visit(i) for i in node.elts] + def visitUnaryOp(self, node): + import ast + if isinstance(node.op, ast.UAdd): + return +self.visit(node.operand) + elif isinstance(node.op, ast.USub): + return -self.visit(node.operand) + else: + raise SyntaxError("Unknown unary op: %r" % node.op) + + def visitName(self, node): + if node.id == 'False': + return False + elif node.id == 'True': + return True + elif node.id == 'None': + return None + else: + raise SyntaxError("Unknown name: %s" % node.id) - def visitUnaryOp(self, node): - import ast - if isinstance(node.op, ast.UAdd): - return +self.visit(node.operand) - elif isinstance(node.op, ast.USub): - return -self.visit(node.operand) - else: - raise SyntaxError("Unknown unary op: %r" % node.op) - - def visitName(self, node): - if node.id == 'False': - return False - elif node.id == 'True': - return True - elif node.id == 'None': - return None - else: - raise SyntaxError("Unknown name: %s" % node.id) + def visitNameConstant(self, node): + return node.value - def visitNameConstant(self, node): - return node.value def safe_eval(source): """ @@ -1151,26 +1111,8 @@ def safe_eval(source): SyntaxError: Unsupported source construct: compiler.ast.CallFunc """ - # Local imports to speed up numpy's import time. - import warnings - - with warnings.catch_warnings(): - # compiler package is deprecated for 3.x, which is already solved - # here - warnings.simplefilter('ignore', DeprecationWarning) - try: - import compiler - except ImportError: - import ast as compiler - - walker = SafeEval() - try: - ast = compiler.parse(source, mode="eval") - except SyntaxError: - raise - try: - return walker.visit(ast) - except SyntaxError: - raise + # Local import to speed up numpy's import time. + import ast + return ast.literal_eval(source) #----------------------------------------------------------------------------- diff --git a/numpy/linalg/bscript b/numpy/linalg/bscript index deed4fd72..70fdd9de3 100644 --- a/numpy/linalg/bscript +++ b/numpy/linalg/bscript @@ -20,7 +20,7 @@ def pbuild(context): return context.default_builder(extension, includes=includes, **kw) - + context.register_builder("lapack_lite", build_lapack_lite) context.register_builder("_umath_linalg", build_lapack_lite) diff --git a/numpy/linalg/lapack_lite/python_xerbla.c b/numpy/linalg/lapack_lite/python_xerbla.c index 2296cc53f..c4d2e484e 100644 --- a/numpy/linalg/lapack_lite/python_xerbla.c +++ b/numpy/linalg/lapack_lite/python_xerbla.c @@ -20,10 +20,9 @@ int xerbla_(char *srname, integer *info) { - const char* format = "On entry to %.*s" \ + static const char format[] = "On entry to %.*s" \ " parameter number %d had an illegal value"; - char buf[57 + 6 + 4]; /* 57 for strlen(format), - 6 for name, 4 for param. num. */ + char buf[sizeof(format) + 6 + 4]; /* 6 for name, 4 for param. num. */ int len = 0; /* length of subroutine name*/ #ifdef WITH_THREAD diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 6b2299fe7..e35c9ac97 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -1921,7 +1921,7 @@ def _multi_svd_norm(x, row_axis, col_axis, op): return result -def norm(x, ord=None, axis=None): +def norm(x, ord=None, axis=None, keepdims=False): """ Matrix or vector norm. @@ -1942,6 +1942,11 @@ def norm(x, ord=None, axis=None): axes that hold 2-D matrices, and the matrix norms of these matrices are computed. If `axis` is None then either a vector norm (when `x` is 1-D) or a matrix norm (when `x` is 2-D) is returned. + keepdims : bool, optional + .. versionadded:: 1.10.0 + If this is set to True, the axes which are normed over are left in the + result as dimensions with size one. With this option the result will + broadcast correctly against the original `x`. Returns ------- @@ -2053,35 +2058,43 @@ def norm(x, ord=None, axis=None): # Check the default case first and handle it immediately. if ord is None and axis is None: + ndim = x.ndim x = x.ravel(order='K') if isComplexType(x.dtype.type): sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag) else: sqnorm = dot(x, x) - return sqrt(sqnorm) + ret = sqrt(sqnorm) + if keepdims: + ret = ret.reshape(ndim*[1]) + return ret # Normalize the `axis` argument to a tuple. nd = x.ndim if axis is None: axis = tuple(range(nd)) elif not isinstance(axis, tuple): + try: + axis = int(axis) + except: + raise TypeError("'axis' must be None, an integer or a tuple of integers") axis = (axis,) if len(axis) == 1: if ord == Inf: - return abs(x).max(axis=axis) + return abs(x).max(axis=axis, keepdims=keepdims) elif ord == -Inf: - return abs(x).min(axis=axis) + return abs(x).min(axis=axis, keepdims=keepdims) elif ord == 0: # Zero norm - return (x != 0).sum(axis=axis) + return (x != 0).sum(axis=axis, keepdims=keepdims) elif ord == 1: # special case for speedup - return add.reduce(abs(x), axis=axis) + return add.reduce(abs(x), axis=axis, keepdims=keepdims) elif ord is None or ord == 2: # special case for speedup s = (x.conj() * x).real - return sqrt(add.reduce(s, axis=axis)) + return sqrt(add.reduce(s, axis=axis, keepdims=keepdims)) else: try: ord + 1 @@ -2100,7 +2113,7 @@ def norm(x, ord=None, axis=None): # if the type changed, we can safely overwrite absx abs(absx, out=absx) absx **= ord - return add.reduce(absx, axis=axis) ** (1.0 / ord) + return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord) elif len(axis) == 2: row_axis, col_axis = axis if not (-nd <= row_axis < nd and -nd <= col_axis < nd): @@ -2109,28 +2122,34 @@ def norm(x, ord=None, axis=None): if row_axis % nd == col_axis % nd: raise ValueError('Duplicate axes given.') if ord == 2: - return _multi_svd_norm(x, row_axis, col_axis, amax) + ret = _multi_svd_norm(x, row_axis, col_axis, amax) elif ord == -2: - return _multi_svd_norm(x, row_axis, col_axis, amin) + ret = _multi_svd_norm(x, row_axis, col_axis, amin) elif ord == 1: if col_axis > row_axis: col_axis -= 1 - return add.reduce(abs(x), axis=row_axis).max(axis=col_axis) + ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis) elif ord == Inf: if row_axis > col_axis: row_axis -= 1 - return add.reduce(abs(x), axis=col_axis).max(axis=row_axis) + ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis) elif ord == -1: if col_axis > row_axis: col_axis -= 1 - return add.reduce(abs(x), axis=row_axis).min(axis=col_axis) + ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis) elif ord == -Inf: if row_axis > col_axis: row_axis -= 1 - return add.reduce(abs(x), axis=col_axis).min(axis=row_axis) + ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis) elif ord in [None, 'fro', 'f']: - return sqrt(add.reduce((x.conj() * x).real, axis=axis)) + ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)) else: raise ValueError("Invalid norm order for matrices.") + if keepdims: + ret_shape = list(x.shape) + ret_shape[axis[0]] = 1 + ret_shape[axis[1]] = 1 + ret = ret.reshape(ret_shape) + return ret else: raise ValueError("Improper number of dimensions to norm.") diff --git a/numpy/linalg/tests/test_build.py b/numpy/linalg/tests/test_build.py index 0d237c81c..6e93dae6c 100644 --- a/numpy/linalg/tests/test_build.py +++ b/numpy/linalg/tests/test_build.py @@ -1,12 +1,11 @@ from __future__ import division, absolute_import, print_function -from subprocess import call, PIPE, Popen +from subprocess import PIPE, Popen import sys import re -import numpy as np from numpy.linalg import lapack_lite -from numpy.testing import TestCase, dec +from numpy.testing import TestCase, dec, run_module_suite from numpy.compat import asbytes_nested @@ -51,3 +50,6 @@ class TestF77Mismatch(TestCase): """Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to cause random crashes and wrong results. See numpy INSTALL.txt for more information.""") + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 8edf36aa6..34079bb87 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -885,6 +885,48 @@ class _TestNorm(object): expected = [norm(B[:,:, k], ord=order) for k in range(B.shape[2])] assert_almost_equal(n, expected) + def test_keepdims(self): + A = np.arange(1,25, dtype=self.dt).reshape(2,3,4) + + allclose_err = 'order {0}, axis = {1}' + shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}' + + # check the order=None, axis=None case + expected = norm(A, ord=None, axis=None) + found = norm(A, ord=None, axis=None, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(None,None)) + expected_shape = (1,1,1) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, None, None)) + + # Vector norms. + for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]: + for k in range(A.ndim): + expected = norm(A, ord=order, axis=k) + found = norm(A, ord=order, axis=k, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(order,k)) + expected_shape = list(A.shape) + expected_shape[k] = 1 + expected_shape = tuple(expected_shape) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, order, k)) + + # Matrix norms. + for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']: + for k in itertools.permutations(range(A.ndim), 2): + expected = norm(A, ord=order, axis=k) + found = norm(A, ord=order, axis=k, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(order,k)) + expected_shape = list(A.shape) + expected_shape[k[0]] = 1 + expected_shape[k[1]] = 1 + expected_shape = tuple(expected_shape) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, order, k)) + def test_bad_args(self): # Check that bad arguments raise the appropriate exceptions. @@ -909,6 +951,7 @@ class _TestNorm(object): assert_raises(ValueError, norm, B, None, (2, 3)) assert_raises(ValueError, norm, B, None, (0, 1, 2)) +class TestNorm_NonSystematic(object): def test_longdouble_norm(self): # Non-regression test: p-norm of longdouble would previously raise # UnboundLocalError. diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 7dadb9d98..34e52d86e 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -845,8 +845,7 @@ class _MaskedUnaryOperation: d = getdata(a) # Case 1.1. : Domained function if self.domain is not None: - with np.errstate(): - np.seterr(divide='ignore', invalid='ignore') + with np.errstate(divide='ignore', invalid='ignore'): result = self.f(d, *args, **kwargs) # Make a mask m = ~umath.isfinite(result) @@ -934,8 +933,7 @@ class _MaskedBinaryOperation: else: m = umath.logical_or(ma, mb) # Get the result - with np.errstate(): - np.seterr(divide='ignore', invalid='ignore') + with np.errstate(divide='ignore', invalid='ignore'): result = self.f(da, db, *args, **kwargs) # check it worked if result is NotImplemented: @@ -947,11 +945,8 @@ class _MaskedBinaryOperation: return result # Case 2. : array # Revert result to da where masked - if m.any(): - np.copyto(result, 0, casting='unsafe', where=m) - # This only makes sense if the operation preserved the dtype - if result.dtype == da.dtype: - result += m * da + if m is not nomask: + np.copyto(result, da, casting='unsafe', where=m) # Transforms to a (subclass of) MaskedArray result = result.view(get_masked_subclass(a, b)) result._mask = m @@ -1075,8 +1070,7 @@ class _DomainedBinaryOperation: (da, db) = (getdata(a, subok=False), getdata(b, subok=False)) (ma, mb) = (getmask(a), getmask(b)) # Get the result - with np.errstate(): - np.seterr(divide='ignore', invalid='ignore') + with np.errstate(divide='ignore', invalid='ignore'): result = self.f(da, db, *args, **kwargs) # check it worked if result is NotImplemented: @@ -1096,8 +1090,7 @@ class _DomainedBinaryOperation: else: return result # When the mask is True, put back da - np.copyto(result, 0, casting='unsafe', where=m) - result += m * da + np.copyto(result, da, casting='unsafe', where=m) result = result.view(get_masked_subclass(a, b)) result._mask = m if isinstance(b, MaskedArray): @@ -2792,12 +2785,50 @@ class MaskedArray(ndarray): """ # Get main attributes ......... self._update_from(obj) + # We have to decide how to initialize self.mask, based on + # obj.mask. This is very difficult. There might be some + # correspondence between the elements in the array we are being + # created from (= obj) and us. Or... there might not. This method can + # be called in all kinds of places for all kinds of reasons -- could + # be empty_like, could be slicing, could be a ufunc, could be a view, + # ... The numpy subclassing interface simply doesn't give us any way + # to know, which means that at best this method will be based on + # guesswork and heuristics. To make things worse, there isn't even any + # clear consensus about what the desired behavior is. For instance, + # most users think that np.empty_like(marr) -- which goes via this + # method -- should return a masked array with an empty mask (see + # gh-3404 and linked discussions), but others disagree, and they have + # existing code which depends on empty_like returning an array that + # matches the input mask. + # + # Historically our algorithm was: if the template object mask had the + # same *number of elements* as us, then we used *it's mask object + # itself* as our mask, so that writes to us would also write to the + # original array. This is horribly broken in multiple ways. + # + # Now what we do instead is, if the template object mask has the same + # number of elements as us, and we do not have the same base pointer + # as the template object (b/c views like arr[...] should keep the same + # mask), then we make a copy of the template object mask and use + # that. This is also horribly broken but somewhat less so. Maybe. if isinstance(obj, ndarray): - odtype = obj.dtype - if odtype.names: - _mask = getattr(obj, '_mask', make_mask_none(obj.shape, odtype)) + # XX: This looks like a bug -- shouldn't it check self.dtype + # instead? + if obj.dtype.names: + _mask = getattr(obj, '_mask', + make_mask_none(obj.shape, obj.dtype)) else: _mask = getattr(obj, '_mask', nomask) + # If self and obj point to exactly the same data, then probably + # self is a simple view of obj (e.g., self = obj[...]), so they + # should share the same mask. (This isn't 100% reliable, e.g. self + # could be the first row of obj, or have strange strides, but as a + # heuristic it's not bad.) In all other cases, we make a copy of + # the mask, so that future modifications to 'self' do not end up + # side-effecting 'obj' as well. + if (obj.__array_interface__["data"][0] + != self.__array_interface__["data"][0]): + _mask = _mask.copy() else: _mask = nomask self._mask = _mask @@ -3842,8 +3873,7 @@ class MaskedArray(ndarray): "Raise self to the power other, in place." other_data = getdata(other) other_mask = getmask(other) - with np.errstate(): - np.seterr(divide='ignore', invalid='ignore') + with np.errstate(divide='ignore', invalid='ignore'): ndarray.__ipow__(self._data, np.where(self._mask, 1, other_data)) invalid = np.logical_not(np.isfinite(self._data)) if invalid.any(): @@ -5031,6 +5061,10 @@ class MaskedArray(ndarray): endwith : {True, False}, optional Whether missing values (if any) should be forced in the upper indices (at the end of the array) (True) or lower indices (at the beginning). + When the array contains unmasked values of the largest (or smallest if + False) representable value of the datatype the ordering of these values + and the masked values is undefined. To enforce the masked values are + at the end (beginning) in this case one must sort the mask. fill_value : {var}, optional Value used internally for the masked values. If ``fill_value`` is not None, it supersedes ``endwith``. @@ -5079,7 +5113,11 @@ class MaskedArray(ndarray): return self if fill_value is None: if endwith: - filler = minimum_fill_value(self) + # nan > inf + if np.issubdtype(self.dtype, np.floating): + filler = np.nan + else: + filler = minimum_fill_value(self) else: filler = maximum_fill_value(self) else: @@ -5596,9 +5634,8 @@ class mvoid(MaskedArray): """ # def __new__(self, data, mask=nomask, dtype=None, fill_value=None, - hardmask=False): - dtype = dtype or data.dtype - _data = np.array(data, dtype=dtype) + hardmask=False, copy=False, subok=True): + _data = np.array(data, copy=copy, subok=subok, dtype=dtype) _data = _data.view(self) _data._hardmask = hardmask if mask is not nomask: @@ -6118,8 +6155,7 @@ def power(a, b, third=None): else: basetype = MaskedArray # Get the result and view it as a (subclass of) MaskedArray - with np.errstate(): - np.seterr(divide='ignore', invalid='ignore') + with np.errstate(divide='ignore', invalid='ignore'): result = np.where(m, fa, umath.power(fa, fb)).view(basetype) result._update_from(a) # Find where we're in trouble w/ NaNs and Infs @@ -6190,7 +6226,11 @@ def sort(a, axis= -1, kind='quicksort', order=None, endwith=True, fill_value=Non axis = 0 if fill_value is None: if endwith: - filler = minimum_fill_value(a) + # nan > inf + if np.issubdtype(a.dtype, np.floating): + filler = np.nan + else: + filler = minimum_fill_value(a) else: filler = maximum_fill_value(a) else: diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index a993fd05d..1849df72b 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -671,8 +671,8 @@ def median(a, axis=None, out=None, overwrite_input=False): """ if not hasattr(a, 'mask') or np.count_nonzero(a.mask) == 0: - return masked_array(np.median(a, axis=axis, out=out, - overwrite_input=overwrite_input), copy=False) + return masked_array(np.median(getattr(a, 'data', a), axis=axis, + out=out, overwrite_input=overwrite_input), copy=False) if overwrite_input: if axis is None: asorted = a.ravel() @@ -705,7 +705,14 @@ def median(a, axis=None, out=None, overwrite_input=False): low = high else: low[odd] = high[odd] - return np.ma.mean([low, high], axis=0, out=out) + + if np.issubdtype(asorted.dtype, np.inexact): + # avoid inf / x = masked + s = np.ma.sum([low, high], axis=0, out=out) + np.true_divide(s.data, 2., casting='unsafe', out=s.data) + else: + s = np.ma.mean([low, high], axis=0, out=out) + return s #.............................................................................. diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index e6f659041..4ac3465aa 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -194,8 +194,7 @@ class TestMaskedArray(TestCase): def test_fix_invalid(self): # Checks fix_invalid. - with np.errstate(): - np.seterr(invalid='ignore') + with np.errstate(invalid='ignore'): data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1]) data_fixed = fix_invalid(data) assert_equal(data_fixed._data, [data.fill_value, 0., 1.]) @@ -815,7 +814,7 @@ class TestMaskedArrayArithmetic(TestCase): res = count(ott) self.assertTrue(res.dtype.type is np.intp) assert_equal(3, res) - + ott = ott.reshape((2, 2)) res = count(ott) assert_(res.dtype.type is np.intp) @@ -2227,6 +2226,13 @@ class TestMaskedArrayMethods(TestCase): assert_equal(b.shape, a.shape) assert_equal(b.fill_value, a.fill_value) + # check empty_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = empty_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view(masked_array) + assert_(np.may_share_memory(a.mask, b.mask)) + def test_put(self): # Tests put. d = arange(5) @@ -3523,8 +3529,15 @@ class TestMaskedFields(TestCase): assert_equal_records(a[-2]._mask, a._mask[-2]) def test_setitem(self): - # Issue 2403 + # Issue 4866: check that one can set individual items in [record][col] + # and [col][record] order ndtype = np.dtype([('a', float), ('b', int)]) + ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype) + ma['a'][1] = 3.0 + assert_equal(ma['a'], np.array([1.0, 3.0])) + ma[1]['a'] = 4.0 + assert_equal(ma['a'], np.array([1.0, 4.0])) + # Issue 2403 mdtype = np.dtype([('a', bool), ('b', bool)]) # soft mask control = np.array([(False, True), (True, True)], dtype=mdtype) diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 6ce1dc346..d7bc765a9 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -504,6 +504,9 @@ class TestApplyOverAxes(TestCase): class TestMedian(TestCase): + def test_pytype(self): + r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1) + assert_equal(r, np.inf) def test_2d(self): # Tests median w/ 2D diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py index 87c2133d7..047f91c77 100644 --- a/numpy/ma/tests/test_old_ma.py +++ b/numpy/ma/tests/test_old_ma.py @@ -607,8 +607,7 @@ class TestMa(TestCase): def test_testScalarArithmetic(self): xm = array(0, mask=1) #TODO FIXME: Find out what the following raises a warning in r8247 - with np.errstate(): - np.seterr(divide='ignore') + with np.errstate(divide='ignore'): self.assertTrue((1 / array(0)).mask) self.assertTrue((1 + xm).mask) self.assertTrue((-xm).mask) diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 0fd5db66a..d0b39ad9f 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -454,6 +454,96 @@ class matrix(N.ndarray): """ return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis) + + # To update docstring from array to matrix... + def squeeze(self, axis=None): + """ + Return a possibly reshaped matrix. + + Refer to `numpy.squeeze` for more documentation. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Selects a subset of the single-dimensional entries in the shape. + If an axis is selected with shape entry greater than one, + an error is raised. + + Returns + ------- + squeezed : matrix + The matrix, but as a (1, N) matrix if it had shape (N, 1). + + See Also + -------- + numpy.squeeze : related function + + Notes + ----- + If `m` has a single column then that column is returned + as the single row of a matrix. Otherwise `m` is returned. + The returned matrix is always either `m` itself or a view into `m`. + Supplying an axis keyword argument will not affect the returned matrix + but it may cause an error to be raised. + + Examples + -------- + >>> c = np.matrix([[1], [2]]) + >>> c + matrix([[1], + [2]]) + >>> c.squeeze() + matrix([[1, 2]]) + >>> r = c.T + >>> r + matrix([[1, 2]]) + >>> r.squeeze() + matrix([[1, 2]]) + >>> m = np.matrix([[1, 2], [3, 4]]) + >>> m.squeeze() + matrix([[1, 2], + [3, 4]]) + + """ + return N.ndarray.squeeze(self, axis=axis) + + + # To update docstring from array to matrix... + def flatten(self, order='C'): + """ + Return a flattened copy of the matrix. + + All `N` elements of the matrix are placed into a single row. + + Parameters + ---------- + order : {'C', 'F', 'A'}, optional + Whether to flatten in C (row-major), Fortran (column-major) order, + or preserve the C/Fortran ordering from `m`. + The default is 'C'. + + Returns + ------- + y : matrix + A copy of the matrix, flattened to a `(1, N)` matrix where `N` + is the number of elements in the original matrix. + + See Also + -------- + ravel : Return a flattened array. + flat : A 1-D flat iterator over the matrix. + + Examples + -------- + >>> m = np.matrix([[1,2], [3,4]]) + >>> m.flatten() + matrix([[1, 2, 3, 4]]) + >>> m.flatten('F') + matrix([[1, 3, 2, 4]]) + + """ + return N.ndarray.flatten(self, order=order) + def mean(self, axis=None, dtype=None, out=None): """ Returns the average of the matrix elements along the given axis. @@ -925,6 +1015,46 @@ class matrix(N.ndarray): """ return self.__array__().ravel() + + def ravel(self, order='C'): + """ + Return a flattened matrix. + + Refer to `numpy.ravel` for more documentation. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + The elements of `m` are read using this index order. 'C' means to + index the elements in C-like order, with the last axis index + changing fastest, back to the first axis index changing slowest. + 'F' means to index the elements in Fortran-like index order, with + the first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of the + memory layout of the underlying array, and only refer to the order + of axis indexing. 'A' means to read the elements in Fortran-like + index order if `m` is Fortran *contiguous* in memory, C-like order + otherwise. 'K' means to read the elements in the order they occur + in memory, except for reversing the data when strides are negative. + By default, 'C' index order is used. + + Returns + ------- + ret : matrix + Return the matrix flattened to shape `(1, N)` where `N` + is the number of elements in the original matrix. + A copy is made only if necessary. + + See Also + -------- + matrix.flatten : returns a similar output matrix but always a copy + matrix.flat : a flat iterator on the array. + numpy.ravel : related function which returns an ndarray + + """ + return N.ndarray.ravel(self, order=order) + + def getT(self): """ Returns the transpose of the matrix. @@ -1031,6 +1161,12 @@ def bmat(obj, ldict=None, gdict=None): obj : str or array_like Input data. Names of variables in the current scope may be referenced, even if `obj` is a string. + ldict : dict, optional + A dictionary that replaces local operands in current frame. + Ignored if `obj` is not a string or `gdict` is `None`. + gdict : dict, optional + A dictionary that replaces global operands in current frame. + Ignored if `obj` is not a string. Returns ------- diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py index a06a564aa..d2a89bd51 100644 --- a/numpy/matrixlib/tests/test_defmatrix.py +++ b/numpy/matrixlib/tests/test_defmatrix.py @@ -386,6 +386,7 @@ class TestNewScalarIndexing(TestCase): assert_array_equal(x[:, [1, 0]], x[:, ::-1]) assert_array_equal(x[[2, 1, 0],:], x[::-1,:]) + class TestPower(TestCase): def test_returntype(self): a = array([[0, 1], [0, 0]]) @@ -396,5 +397,51 @@ class TestPower(TestCase): def test_list(self): assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]]) + +class TestShape(TestCase): + def setUp(self): + self.a = array([[1], [2]]) + self.m = matrix([[1], [2]]) + + def test_shape(self): + assert_equal(self.a.shape, (2, 1)) + assert_equal(self.m.shape, (2, 1)) + + def test_numpy_ravel(self): + assert_equal(np.ravel(self.a).shape, (2,)) + assert_equal(np.ravel(self.m).shape, (2,)) + + def test_member_ravel(self): + assert_equal(self.a.ravel().shape, (2,)) + assert_equal(self.m.ravel().shape, (1, 2)) + + def test_member_flatten(self): + assert_equal(self.a.flatten().shape, (2,)) + assert_equal(self.m.flatten().shape, (1, 2)) + + def test_numpy_ravel_order(self): + for t in array, matrix: + x = t([[1, 2, 3], [4, 5, 6]]) + assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) + assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) + + def test_matrix_ravel_order(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.ravel(), [[1, 2, 3, 4, 5, 6]]) + assert_equal(x.ravel(order='F'), [[1, 4, 2, 5, 3, 6]]) + assert_equal(x.T.ravel(), [[1, 4, 2, 5, 3, 6]]) + assert_equal(x.T.ravel(order='A'), [[1, 2, 3, 4, 5, 6]]) + + def test_array_memory_sharing(self): + assert_(np.may_share_memory(self.a, self.a.ravel())) + assert_(not np.may_share_memory(self.a, self.a.flatten())) + + def test_matrix_memory_sharing(self): + assert_(np.may_share_memory(self.m, self.m.ravel())) + assert_(not np.may_share_memory(self.m, self.m.flatten())) + + if __name__ == "__main__": run_module_suite() diff --git a/numpy/matrixlib/tests/test_multiarray.py b/numpy/matrixlib/tests/test_multiarray.py index fc5b1df17..64d63f125 100644 --- a/numpy/matrixlib/tests/test_multiarray.py +++ b/numpy/matrixlib/tests/test_multiarray.py @@ -16,3 +16,6 @@ class TestView(TestCase): assert_(isinstance(y, np.matrix)) assert_equal(y.dtype, np.dtype('<i2')) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy/matrixlib/tests/test_numeric.py b/numpy/matrixlib/tests/test_numeric.py index fa88f5288..3588de5e6 100644 --- a/numpy/matrixlib/tests/test_numeric.py +++ b/numpy/matrixlib/tests/test_numeric.py @@ -1,6 +1,6 @@ from __future__ import division, absolute_import, print_function -from numpy.testing import assert_equal, TestCase +from numpy.testing import assert_equal, TestCase, run_module_suite from numpy.core import ones from numpy import matrix @@ -8,3 +8,6 @@ class TestDot(TestCase): def test_matscalar(self): b1 = matrix(ones((3, 3), dtype=complex)) assert_equal(b1*1.0, b1) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy/matrixlib/tests/test_regression.py b/numpy/matrixlib/tests/test_regression.py index 4bbd44dcf..119b21d8a 100644 --- a/numpy/matrixlib/tests/test_regression.py +++ b/numpy/matrixlib/tests/test_regression.py @@ -32,3 +32,6 @@ class TestRegression(TestCase): x = np.asmatrix(np.random.uniform(0, 1, (3, 3))) self.assertEqual(x.std().shape, ()) self.assertEqual(x.argmax().shape, ()) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py index e9ca387c3..1200d1c8d 100644 --- a/numpy/polynomial/__init__.py +++ b/numpy/polynomial/__init__.py @@ -15,8 +15,6 @@ information can be found in the docstring for the module of interest. """ from __future__ import division, absolute_import, print_function -import warnings - from .polynomial import Polynomial from .chebyshev import Chebyshev from .legendre import Legendre diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index 23608c74a..234b509aa 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -374,7 +374,7 @@ class ABCPolyBase(object): return quo, rem def __pow__(self, other): - coef = self._pow(self.coef, other, maxpower = self.maxpower) + coef = self._pow(self.coef, other, maxpower=self.maxpower) res = self.__class__(coef, self.domain, self.window) return res @@ -721,8 +721,6 @@ class ABCPolyBase(object): y = self(x) return x, y - - @classmethod def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, window=None): diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index b4acbbeab..f213ab3fd 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -94,13 +94,14 @@ import numpy.linalg as la from . import polyutils as pu from ._polybase import ABCPolyBase -__all__ = ['chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', - 'chebadd', 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', - 'chebval', 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', - 'chebfromroots', 'chebvander', 'chebfit', 'chebtrim', 'chebroots', - 'chebpts1', 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', - 'chebgrid2d', 'chebgrid3d', 'chebvander2d', 'chebvander3d', - 'chebcompanion', 'chebgauss', 'chebweight'] +__all__ = [ + 'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd', + 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval', + 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', + 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1', + 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d', + 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion', + 'chebgauss', 'chebweight'] chebtrim = pu.trimcoef @@ -109,7 +110,7 @@ chebtrim = pu.trimcoef # functions and do minimal error checking. # -def _cseries_to_zseries(c) : +def _cseries_to_zseries(c): """Covert Chebyshev series to z-series. Covert a Chebyshev series to the equivalent z-series. The result is @@ -134,7 +135,7 @@ def _cseries_to_zseries(c) : return zs + zs[::-1] -def _zseries_to_cseries(zs) : +def _zseries_to_cseries(zs): """Covert z-series to a Chebyshev series. Covert a z series to the equivalent Chebyshev series. The result is @@ -159,7 +160,7 @@ def _zseries_to_cseries(zs) : return c -def _zseries_mul(z1, z2) : +def _zseries_mul(z1, z2): """Multiply two z-series. Multiply two z-series to produce a z-series. @@ -186,7 +187,7 @@ def _zseries_mul(z1, z2) : return np.convolve(z1, z2) -def _zseries_div(z1, z2) : +def _zseries_div(z1, z2): """Divide the first z-series by the second. Divide `z1` by `z2` and return the quotient and remainder as z-series. @@ -223,19 +224,19 @@ def _zseries_div(z1, z2) : z2 = z2.copy() len1 = len(z1) len2 = len(z2) - if len2 == 1 : + if len2 == 1: z1 /= z2 return z1, z1[:1]*0 - elif len1 < len2 : + elif len1 < len2: return z1[:1]*0, z1 - else : + else: dlen = len1 - len2 scl = z2[0] z2 /= scl quo = np.empty(dlen + 1, dtype=z1.dtype) i = 0 j = dlen - while i < j : + while i < j: r = z1[i] quo[i] = z1[i] quo[dlen - i] = r @@ -253,7 +254,7 @@ def _zseries_div(z1, z2) : return quo, rem -def _zseries_der(zs) : +def _zseries_der(zs): """Differentiate a z-series. The derivative is with respect to x, not z. This is achieved using the @@ -285,7 +286,7 @@ def _zseries_der(zs) : return d -def _zseries_int(zs) : +def _zseries_int(zs): """Integrate a z-series. The integral is with respect to x, not z. This is achieved by a change @@ -323,7 +324,7 @@ def _zseries_int(zs) : # -def poly2cheb(pol) : +def poly2cheb(pol): """ Convert a polynomial to a Chebyshev series. @@ -368,12 +369,12 @@ def poly2cheb(pol) : [pol] = pu.as_series([pol]) deg = len(pol) - 1 res = 0 - for i in range(deg, -1, -1) : + for i in range(deg, -1, -1): res = chebadd(chebmulx(res), pol[i]) return res -def cheb2poly(c) : +def cheb2poly(c): """ Convert a Chebyshev series to a polynomial. @@ -427,7 +428,7 @@ def cheb2poly(c) : c0 = c[-2] c1 = c[-1] # i is the current degree of c1 - for i in range(n - 1, 1, -1) : + for i in range(n - 1, 1, -1): tmp = c0 c0 = polysub(c[i - 2], c1) c1 = polyadd(tmp, polymulx(c1)*2) @@ -452,7 +453,7 @@ chebone = np.array([1]) chebx = np.array([0, 1]) -def chebline(off, scl) : +def chebline(off, scl): """ Chebyshev series whose graph is a straight line. @@ -482,13 +483,13 @@ def chebline(off, scl) : -3.0 """ - if scl != 0 : + if scl != 0: return np.array([off, scl]) - else : + else: return np.array([off]) -def chebfromroots(roots) : +def chebfromroots(roots): """ Generate a Chebyshev series with given roots. @@ -537,9 +538,9 @@ def chebfromroots(roots) : array([ 1.5+0.j, 0.0+0.j, 0.5+0.j]) """ - if len(roots) == 0 : + if len(roots) == 0: return np.ones(1) - else : + else: [roots] = pu.as_series([roots], trim=False) roots.sort() p = [chebline(-r, 1) for r in roots] @@ -595,10 +596,10 @@ def chebadd(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : + if len(c1) > len(c2): c1[:c2.size] += c2 ret = c1 - else : + else: c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) @@ -647,10 +648,10 @@ def chebsub(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : + if len(c1) > len(c2): c1[:c2.size] -= c2 ret = c1 - else : + else: c2 = -c2 c2[:c1.size] += c1 ret = c2 @@ -794,16 +795,16 @@ def chebdiv(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0 : + if c2[-1] == 0: raise ZeroDivisionError() lc1 = len(c1) lc2 = len(c2) - if lc1 < lc2 : + if lc1 < lc2: return c1[:1]*0, c1 - elif lc2 == 1 : + elif lc2 == 1: return c1/c2[-1], c1[:1]*0 - else : + else: z1 = _cseries_to_zseries(c1) z2 = _cseries_to_zseries(c2) quo, rem = _zseries_div(z1, z2) @@ -812,7 +813,7 @@ def chebdiv(c1, c2): return quo, rem -def chebpow(c, pow, maxpower=16) : +def chebpow(c, pow, maxpower=16): """Raise a Chebyshev series to a power. Returns the Chebyshev series `c` raised to the power `pow`. The @@ -846,25 +847,25 @@ def chebpow(c, pow, maxpower=16) : # c is a trimmed copy [c] = pu.as_series([c]) power = int(pow) - if power != pow or power < 0 : + if power != pow or power < 0: raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower : + elif maxpower is not None and power > maxpower: raise ValueError("Power is too large") - elif power == 0 : + elif power == 0: return np.array([1], dtype=c.dtype) - elif power == 1 : + elif power == 1: return c - else : + else: # This can be made more efficient by using powers of two # in the usual way. zs = _cseries_to_zseries(c) prd = zs - for i in range(2, power + 1) : + for i in range(2, power + 1): prd = np.convolve(prd, zs) return _zseries_to_cseries(prd) -def chebder(c, m=1, scl=1, axis=0) : +def chebder(c, m=1, scl=1, axis=0): """ Differentiate a Chebyshev series. @@ -1057,9 +1058,9 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): if cnt != m: raise ValueError("The order of integration must be integer") - if cnt < 0 : + if cnt < 0: raise ValueError("The order of integration must be non-negative") - if len(k) > cnt : + if len(k) > cnt: raise ValueError("Too many integration constants") if iaxis != axis: raise ValueError("The axis must be integer") @@ -1073,7 +1074,7 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c = np.rollaxis(c, iaxis) k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt) : + for i in range(cnt): n = len(c) c *= scl if n == 1 and np.all(c[0] == 0): @@ -1162,19 +1163,19 @@ def chebval(x, c, tensor=True): if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) + c = c.reshape(c.shape + (1,)*x.ndim) - if len(c) == 1 : + if len(c) == 1: c0 = c[0] c1 = 0 - elif len(c) == 2 : + elif len(c) == 2: c0 = c[0] c1 = c[1] - else : + else: x2 = 2*x c0 = c[-2] c1 = c[-1] - for i in range(3, len(c) + 1) : + for i in range(3, len(c) + 1): tmp = c0 c0 = c[-i] - c1 c1 = tmp + c1*x2 @@ -1410,7 +1411,7 @@ def chebgrid3d(x, y, z, c): return c -def chebvander(x, deg) : +def chebvander(x, deg): """Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree `deg` and sample points @@ -1457,15 +1458,15 @@ def chebvander(x, deg) : v = np.empty(dims, dtype=dtyp) # Use forward recursion to generate the entries. v[0] = x*0 + 1 - if ideg > 0 : + if ideg > 0: x2 = 2*x v[1] = x - for i in range(2, ideg + 1) : + for i in range(2, ideg + 1): v[i] = v[i-1]*x2 - v[i-2] return np.rollaxis(v, 0, v.ndim) -def chebvander2d(x, y, deg) : +def chebvander2d(x, y, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample @@ -1528,7 +1529,7 @@ def chebvander2d(x, y, deg) : return v.reshape(v.shape[:-2] + (-1,)) -def chebvander3d(x, y, z, deg) : +def chebvander3d(x, y, z, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample @@ -1714,13 +1715,13 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): y = np.asarray(y) + 0.0 # check arguments. - if deg < 0 : + if deg < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2 : + if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if len(x) != len(y): raise TypeError("expected x and y to have same length") @@ -1740,7 +1741,7 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): rhs = rhs * w # set rcond - if rcond is None : + if rcond is None: rcond = len(x)*np.finfo(x.dtype).eps # Determine the norms of the design matrix columns. @@ -1759,9 +1760,9 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): msg = "The fit may be poorly conditioned" warnings.warn(msg, pu.RankWarning) - if full : + if full: return c, [resids, rank, s, rcond] - else : + else: return c @@ -1916,8 +1917,8 @@ def chebweight(x): The weight function of the Chebyshev polynomials. The weight function is :math:`1/\sqrt{1 - x^2}` and the interval of - integration is :math:`[-1, 1]`. The Chebyshev polynomials are orthogonal, but - not normalized, with respect to this weight function. + integration is :math:`[-1, 1]`. The Chebyshev polynomials are + orthogonal, but not normalized, with respect to this weight function. Parameters ---------- diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 43ede58ac..1d3bef390 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -66,18 +66,18 @@ import numpy.linalg as la from . import polyutils as pu from ._polybase import ABCPolyBase -__all__ = ['hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', - 'hermadd', 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', - 'hermval', 'hermder', 'hermint', 'herm2poly', 'poly2herm', - 'hermfromroots', 'hermvander', 'hermfit', 'hermtrim', 'hermroots', - 'Hermite', 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', - 'hermvander2d', 'hermvander3d', 'hermcompanion', 'hermgauss', - 'hermweight'] +__all__ = [ + 'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd', + 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval', + 'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots', + 'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite', + 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d', + 'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight'] hermtrim = pu.trimcoef -def poly2herm(pol) : +def poly2herm(pol): """ poly2herm(pol) @@ -118,12 +118,12 @@ def poly2herm(pol) : [pol] = pu.as_series([pol]) deg = len(pol) - 1 res = 0 - for i in range(deg, -1, -1) : + for i in range(deg, -1, -1): res = hermadd(hermmulx(res), pol[i]) return res -def herm2poly(c) : +def herm2poly(c): """ Convert a Hermite series to a polynomial. @@ -174,7 +174,7 @@ def herm2poly(c) : c0 = c[-2] c1 = c[-1] # i is the current degree of c1 - for i in range(n - 1, 1, -1) : + for i in range(n - 1, 1, -1): tmp = c0 c0 = polysub(c[i - 2], c1*(2*(i - 1))) c1 = polyadd(tmp, polymulx(c1)*2) @@ -198,7 +198,7 @@ hermone = np.array([1]) hermx = np.array([0, 1/2]) -def hermline(off, scl) : +def hermline(off, scl): """ Hermite series whose graph is a straight line. @@ -228,13 +228,13 @@ def hermline(off, scl) : 5.0 """ - if scl != 0 : + if scl != 0: return np.array([off, scl/2]) - else : + else: return np.array([off]) -def hermfromroots(roots) : +def hermfromroots(roots): """ Generate a Hermite series with given roots. @@ -284,9 +284,9 @@ def hermfromroots(roots) : array([ 0.+0.j, 0.+0.j]) """ - if len(roots) == 0 : + if len(roots) == 0: return np.ones(1) - else : + else: [roots] = pu.as_series([roots], trim=False) roots.sort() p = [hermline(-r, 1) for r in roots] @@ -340,10 +340,10 @@ def hermadd(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : + if len(c1) > len(c2): c1[:c2.size] += c2 ret = c1 - else : + else: c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) @@ -388,10 +388,10 @@ def hermsub(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : + if len(c1) > len(c2): c1[:c2.size] -= c2 ret = c1 - else : + else: c2 = -c2 c2[:c1.size] += c1 ret = c2 @@ -501,13 +501,13 @@ def hermmul(c1, c2): elif len(c) == 2: c0 = c[0]*xs c1 = c[1]*xs - else : + else: nd = len(c) c0 = c[-2]*xs c1 = c[-1]*xs - for i in range(3, len(c) + 1) : + for i in range(3, len(c) + 1): tmp = c0 - nd = nd - 1 + nd = nd - 1 c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1))) c1 = hermadd(tmp, hermmulx(c1)*2) return hermadd(c0, hermmulx(c1)*2) @@ -560,16 +560,16 @@ def hermdiv(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0 : + if c2[-1] == 0: raise ZeroDivisionError() lc1 = len(c1) lc2 = len(c2) - if lc1 < lc2 : + if lc1 < lc2: return c1[:1]*0, c1 - elif lc2 == 1 : + elif lc2 == 1: return c1/c2[-1], c1[:1]*0 - else : + else: quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) rem = c1 for i in range(lc1 - lc2, - 1, -1): @@ -580,7 +580,7 @@ def hermdiv(c1, c2): return quo, pu.trimseq(rem) -def hermpow(c, pow, maxpower=16) : +def hermpow(c, pow, maxpower=16): """Raise a Hermite series to a power. Returns the Hermite series `c` raised to the power `pow`. The @@ -617,24 +617,24 @@ def hermpow(c, pow, maxpower=16) : # c is a trimmed copy [c] = pu.as_series([c]) power = int(pow) - if power != pow or power < 0 : + if power != pow or power < 0: raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower : + elif maxpower is not None and power > maxpower: raise ValueError("Power is too large") - elif power == 0 : + elif power == 0: return np.array([1], dtype=c.dtype) - elif power == 1 : + elif power == 1: return c - else : + else: # This can be made more efficient by using powers of two # in the usual way. prd = c - for i in range(2, power + 1) : + for i in range(2, power + 1): prd = hermmul(prd, c) return prd -def hermder(c, m=1, scl=1, axis=0) : +def hermder(c, m=1, scl=1, axis=0): """ Differentiate a Hermite series. @@ -712,7 +712,7 @@ def hermder(c, m=1, scl=1, axis=0) : n = len(c) if cnt >= n: c = c[:1]*0 - else : + else: for i in range(cnt): n = n - 1 c *= scl @@ -816,9 +816,9 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): if cnt != m: raise ValueError("The order of integration must be integer") - if cnt < 0 : + if cnt < 0: raise ValueError("The order of integration must be non-negative") - if len(k) > cnt : + if len(k) > cnt: raise ValueError("Too many integration constants") if iaxis != axis: raise ValueError("The axis must be integer") @@ -832,7 +832,7 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c = np.rollaxis(c, iaxis) k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt) : + for i in range(cnt): n = len(c) c *= scl if n == 1 and np.all(c[0] == 0): @@ -924,22 +924,22 @@ def hermval(x, c, tensor=True): if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) + c = c.reshape(c.shape + (1,)*x.ndim) x2 = x*2 - if len(c) == 1 : + if len(c) == 1: c0 = c[0] c1 = 0 - elif len(c) == 2 : + elif len(c) == 2: c0 = c[0] c1 = c[1] - else : + else: nd = len(c) c0 = c[-2] c1 = c[-1] - for i in range(3, len(c) + 1) : + for i in range(3, len(c) + 1): tmp = c0 - nd = nd - 1 + nd = nd - 1 c0 = c[-i] - c1*(2*(nd - 1)) c1 = tmp + c1*x2 return c0 + c1*x2 @@ -1174,7 +1174,7 @@ def hermgrid3d(x, y, z, c): return c -def hermvander(x, deg) : +def hermvander(x, deg): """Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree `deg` and sample points @@ -1229,15 +1229,15 @@ def hermvander(x, deg) : dtyp = x.dtype v = np.empty(dims, dtype=dtyp) v[0] = x*0 + 1 - if ideg > 0 : + if ideg > 0: x2 = x*2 v[1] = x2 - for i in range(2, ideg + 1) : + for i in range(2, ideg + 1): v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1))) return np.rollaxis(v, 0, v.ndim) -def hermvander2d(x, y, deg) : +def hermvander2d(x, y, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample @@ -1300,7 +1300,7 @@ def hermvander2d(x, y, deg) : return v.reshape(v.shape[:-2] + (-1,)) -def hermvander3d(x, y, z, deg) : +def hermvander3d(x, y, z, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample @@ -1491,13 +1491,13 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): y = np.asarray(y) + 0.0 # check arguments. - if deg < 0 : + if deg < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2 : + if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if len(x) != len(y): raise TypeError("expected x and y to have same length") @@ -1517,7 +1517,7 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): rhs = rhs * w # set rcond - if rcond is None : + if rcond is None: rcond = len(x)*np.finfo(x.dtype).eps # Determine the norms of the design matrix columns. @@ -1536,9 +1536,9 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): msg = "The fit may be poorly conditioned" warnings.warn(msg, pu.RankWarning) - if full : + if full: return c, [resids, rank, s, rcond] - else : + else: return c @@ -1568,7 +1568,6 @@ def hermcompanion(c): .. versionadded::1.7.0 """ - accprod = np.multiply.accumulate # c is a trimmed copy [c] = pu.as_series([c]) if len(c) < 2: @@ -1578,13 +1577,13 @@ def hermcompanion(c): n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) - scl = np.hstack((1., np.sqrt(2.*np.arange(1, n)))) - scl = np.multiply.accumulate(scl) + scl = np.hstack((1., 1./np.sqrt(2.*np.arange(n - 1, 0, -1)))) + scl = np.multiply.accumulate(scl)[::-1] top = mat.reshape(-1)[1::n+1] bot = mat.reshape(-1)[n::n+1] top[...] = np.sqrt(.5*np.arange(1, n)) bot[...] = top - mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5 + mat[:, -1] -= scl*c[:-1]/(2.0*c[-1]) return mat @@ -1636,9 +1635,9 @@ def hermroots(c): """ # c is a trimmed copy [c] = pu.as_series([c]) - if len(c) <= 1 : + if len(c) <= 1: return np.array([], dtype=c.dtype) - if len(c) == 2 : + if len(c) == 2: return np.array([-.5*c[0]/c[1]]) m = hermcompanion(c) @@ -1647,6 +1646,49 @@ def hermroots(c): return r +def _normed_hermite_n(x, n): + """ + Evaluate a normalized Hermite polynomial. + + Compute the value of the normalized Hermite polynomial of degree ``n`` + at the points ``x``. + + + Parameters + ---------- + x : ndarray of double. + Points at which to evaluate the function + n : int + Degree of the normalized Hermite function to be evaluated. + + Returns + ------- + values : ndarray + The shape of the return value is described above. + + Notes + ----- + .. versionadded:: 1.10.0 + + This function is needed for finding the Gauss points and integration + weights for high degrees. The values of the standard Hermite functions + overflow when n >= 207. + + """ + if n == 0: + return np.ones(x.shape)/np.sqrt(np.sqrt(np.pi)) + + c0 = 0. + c1 = 1./np.sqrt(np.sqrt(np.pi)) + nd = float(n) + for i in range(n - 1): + tmp = c0 + c0 = -c1*np.sqrt((nd - 1.)/nd) + c1 = tmp + c1*x*np.sqrt(2./nd) + nd = nd - 1.0 + return c0 + c1*x*np.sqrt(2) + + def hermgauss(deg): """ Gauss-Hermite quadrature. @@ -1689,22 +1731,21 @@ def hermgauss(deg): # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1]) + c = np.array([0]*deg + [1], dtype=np.float64) m = hermcompanion(c) - x = la.eigvals(m) + x = la.eigvalsh(m) x.sort() # improve roots by one application of Newton - dy = hermval(x, c) - df = hermval(x, hermder(c)) + dy = _normed_hermite_n(x, ideg) + df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg) x -= dy/df # compute the weights. We scale the factor to avoid possible numerical # overflow. - fm = hermval(x, c[1:]) + fm = _normed_hermite_n(x, ideg - 1) fm /= np.abs(fm).max() - df /= np.abs(df).max() - w = 1/(fm * df) + w = 1/(fm * fm) # for Hermite we can also symmetrize w = (w + w[::-1])/2 diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 874b42470..fce13a84e 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -66,18 +66,19 @@ import numpy.linalg as la from . import polyutils as pu from ._polybase import ABCPolyBase -__all__ = ['hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline', - 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', 'hermpow', - 'hermeval', - 'hermeder', 'hermeint', 'herme2poly', 'poly2herme', 'hermefromroots', - 'hermevander', 'hermefit', 'hermetrim', 'hermeroots', 'HermiteE', - 'hermeval2d', 'hermeval3d', 'hermegrid2d', 'hermegrid3d', 'hermevander2d', - 'hermevander3d', 'hermecompanion', 'hermegauss', 'hermeweight'] +__all__ = [ + 'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline', + 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', + 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly', + 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim', + 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d', + 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion', + 'hermegauss', 'hermeweight'] hermetrim = pu.trimcoef -def poly2herme(pol) : +def poly2herme(pol): """ poly2herme(pol) @@ -118,12 +119,12 @@ def poly2herme(pol) : [pol] = pu.as_series([pol]) deg = len(pol) - 1 res = 0 - for i in range(deg, -1, -1) : + for i in range(deg, -1, -1): res = hermeadd(hermemulx(res), pol[i]) return res -def herme2poly(c) : +def herme2poly(c): """ Convert a Hermite series to a polynomial. @@ -173,7 +174,7 @@ def herme2poly(c) : c0 = c[-2] c1 = c[-1] # i is the current degree of c1 - for i in range(n - 1, 1, -1) : + for i in range(n - 1, 1, -1): tmp = c0 c0 = polysub(c[i - 2], c1*(i - 1)) c1 = polyadd(tmp, polymulx(c1)) @@ -197,7 +198,7 @@ hermeone = np.array([1]) hermex = np.array([0, 1]) -def hermeline(off, scl) : +def hermeline(off, scl): """ Hermite series whose graph is a straight line. @@ -228,13 +229,13 @@ def hermeline(off, scl) : 5.0 """ - if scl != 0 : + if scl != 0: return np.array([off, scl]) - else : + else: return np.array([off]) -def hermefromroots(roots) : +def hermefromroots(roots): """ Generate a HermiteE series with given roots. @@ -284,9 +285,9 @@ def hermefromroots(roots) : array([ 0.+0.j, 0.+0.j]) """ - if len(roots) == 0 : + if len(roots) == 0: return np.ones(1) - else : + else: [roots] = pu.as_series([roots], trim=False) roots.sort() p = [hermeline(-r, 1) for r in roots] @@ -340,10 +341,10 @@ def hermeadd(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : + if len(c1) > len(c2): c1[:c2.size] += c2 ret = c1 - else : + else: c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) @@ -388,10 +389,10 @@ def hermesub(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : + if len(c1) > len(c2): c1[:c2.size] -= c2 ret = c1 - else : + else: c2 = -c2 c2[:c1.size] += c1 ret = c2 @@ -501,13 +502,13 @@ def hermemul(c1, c2): elif len(c) == 2: c0 = c[0]*xs c1 = c[1]*xs - else : + else: nd = len(c) c0 = c[-2]*xs c1 = c[-1]*xs - for i in range(3, len(c) + 1) : + for i in range(3, len(c) + 1): tmp = c0 - nd = nd - 1 + nd = nd - 1 c0 = hermesub(c[-i]*xs, c1*(nd - 1)) c1 = hermeadd(tmp, hermemulx(c1)) return hermeadd(c0, hermemulx(c1)) @@ -558,16 +559,16 @@ def hermediv(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0 : + if c2[-1] == 0: raise ZeroDivisionError() lc1 = len(c1) lc2 = len(c2) - if lc1 < lc2 : + if lc1 < lc2: return c1[:1]*0, c1 - elif lc2 == 1 : + elif lc2 == 1: return c1/c2[-1], c1[:1]*0 - else : + else: quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) rem = c1 for i in range(lc1 - lc2, - 1, -1): @@ -578,7 +579,7 @@ def hermediv(c1, c2): return quo, pu.trimseq(rem) -def hermepow(c, pow, maxpower=16) : +def hermepow(c, pow, maxpower=16): """Raise a Hermite series to a power. Returns the Hermite series `c` raised to the power `pow`. The @@ -615,24 +616,24 @@ def hermepow(c, pow, maxpower=16) : # c is a trimmed copy [c] = pu.as_series([c]) power = int(pow) - if power != pow or power < 0 : + if power != pow or power < 0: raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower : + elif maxpower is not None and power > maxpower: raise ValueError("Power is too large") - elif power == 0 : + elif power == 0: return np.array([1], dtype=c.dtype) - elif power == 1 : + elif power == 1: return c - else : + else: # This can be made more efficient by using powers of two # in the usual way. prd = c - for i in range(2, power + 1) : + for i in range(2, power + 1): prd = hermemul(prd, c) return prd -def hermeder(c, m=1, scl=1, axis=0) : +def hermeder(c, m=1, scl=1, axis=0): """ Differentiate a Hermite_e series. @@ -710,7 +711,7 @@ def hermeder(c, m=1, scl=1, axis=0) : n = len(c) if cnt >= n: return c[:1]*0 - else : + else: for i in range(cnt): n = n - 1 c *= scl @@ -814,9 +815,9 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): if cnt != m: raise ValueError("The order of integration must be integer") - if cnt < 0 : + if cnt < 0: raise ValueError("The order of integration must be non-negative") - if len(k) > cnt : + if len(k) > cnt: raise ValueError("Too many integration constants") if iaxis != axis: raise ValueError("The axis must be integer") @@ -830,7 +831,7 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c = np.rollaxis(c, iaxis) k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt) : + for i in range(cnt): n = len(c) c *= scl if n == 1 and np.all(c[0] == 0): @@ -922,21 +923,21 @@ def hermeval(x, c, tensor=True): if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) + c = c.reshape(c.shape + (1,)*x.ndim) - if len(c) == 1 : + if len(c) == 1: c0 = c[0] c1 = 0 - elif len(c) == 2 : + elif len(c) == 2: c0 = c[0] c1 = c[1] - else : + else: nd = len(c) c0 = c[-2] c1 = c[-1] - for i in range(3, len(c) + 1) : + for i in range(3, len(c) + 1): tmp = c0 - nd = nd - 1 + nd = nd - 1 c0 = c[-i] - c1*(nd - 1) c1 = tmp + c1*x return c0 + c1*x @@ -1171,7 +1172,7 @@ def hermegrid3d(x, y, z, c): return c -def hermevander(x, deg) : +def hermevander(x, deg): """Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree `deg` and sample points @@ -1226,14 +1227,14 @@ def hermevander(x, deg) : dtyp = x.dtype v = np.empty(dims, dtype=dtyp) v[0] = x*0 + 1 - if ideg > 0 : + if ideg > 0: v[1] = x - for i in range(2, ideg + 1) : + for i in range(2, ideg + 1): v[i] = (v[i-1]*x - v[i-2]*(i - 1)) return np.rollaxis(v, 0, v.ndim) -def hermevander2d(x, y, deg) : +def hermevander2d(x, y, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample @@ -1296,7 +1297,7 @@ def hermevander2d(x, y, deg) : return v.reshape(v.shape[:-2] + (-1,)) -def hermevander3d(x, y, z, deg) : +def hermevander3d(x, y, z, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample @@ -1487,13 +1488,13 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): y = np.asarray(y) + 0.0 # check arguments. - if deg < 0 : + if deg < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2 : + if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if len(x) != len(y): raise TypeError("expected x and y to have same length") @@ -1513,7 +1514,7 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): rhs = rhs * w # set rcond - if rcond is None : + if rcond is None: rcond = len(x)*np.finfo(x.dtype).eps # Determine the norms of the design matrix columns. @@ -1532,9 +1533,9 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): msg = "The fit may be poorly conditioned" warnings.warn(msg, pu.RankWarning) - if full : + if full: return c, [resids, rank, s, rcond] - else : + else: return c @@ -1565,7 +1566,6 @@ def hermecompanion(c): .. versionadded::1.7.0 """ - accprod = np.multiply.accumulate # c is a trimmed copy [c] = pu.as_series([c]) if len(c) < 2: @@ -1575,13 +1575,13 @@ def hermecompanion(c): n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) - scl = np.hstack((1., np.sqrt(np.arange(1, n)))) - scl = np.multiply.accumulate(scl) + scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1)))) + scl = np.multiply.accumulate(scl)[::-1] top = mat.reshape(-1)[1::n+1] bot = mat.reshape(-1)[n::n+1] top[...] = np.sqrt(np.arange(1, n)) bot[...] = top - mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1]) + mat[:, -1] -= scl*c[:-1]/c[-1] return mat @@ -1633,9 +1633,9 @@ def hermeroots(c): """ # c is a trimmed copy [c] = pu.as_series([c]) - if len(c) <= 1 : + if len(c) <= 1: return np.array([], dtype=c.dtype) - if len(c) == 2 : + if len(c) == 2: return np.array([-c[0]/c[1]]) m = hermecompanion(c) @@ -1644,6 +1644,49 @@ def hermeroots(c): return r +def _normed_hermite_e_n(x, n): + """ + Evaluate a normalized HermiteE polynomial. + + Compute the value of the normalized HermiteE polynomial of degree ``n`` + at the points ``x``. + + + Parameters + ---------- + x : ndarray of double. + Points at which to evaluate the function + n : int + Degree of the normalized HermiteE function to be evaluated. + + Returns + ------- + values : ndarray + The shape of the return value is described above. + + Notes + ----- + .. versionadded:: 1.10.0 + + This function is needed for finding the Gauss points and integration + weights for high degrees. The values of the standard HermiteE functions + overflow when n >= 207. + + """ + if n == 0: + return np.ones(x.shape)/np.sqrt(np.sqrt(2*np.pi)) + + c0 = 0. + c1 = 1./np.sqrt(np.sqrt(2*np.pi)) + nd = float(n) + for i in range(n - 1): + tmp = c0 + c0 = -c1*np.sqrt((nd - 1.)/nd) + c1 = tmp + c1*x*np.sqrt(1./nd) + nd = nd - 1.0 + return c0 + c1*x + + def hermegauss(deg): """ Gauss-HermiteE quadrature. @@ -1688,20 +1731,19 @@ def hermegauss(deg): # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0]*deg + [1]) m = hermecompanion(c) - x = la.eigvals(m) + x = la.eigvalsh(m) x.sort() # improve roots by one application of Newton - dy = hermeval(x, c) - df = hermeval(x, hermeder(c)) + dy = _normed_hermite_e_n(x, ideg) + df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg) x -= dy/df # compute the weights. We scale the factor to avoid possible numerical # overflow. - fm = hermeval(x, c[1:]) + fm = _normed_hermite_e_n(x, ideg - 1) fm /= np.abs(fm).max() - df /= np.abs(df).max() - w = 1/(fm * df) + w = 1/(fm * fm) # for Hermite_e we can also symmetrize w = (w + w[::-1])/2 diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 9d88162ce..8d2705d5d 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -66,17 +66,18 @@ import numpy.linalg as la from . import polyutils as pu from ._polybase import ABCPolyBase -__all__ = ['lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', - 'lagadd', 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', - 'lagval', 'lagder', 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', - 'lagvander', 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', - 'lagval3d', 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', - 'lagcompanion', 'laggauss', 'lagweight'] +__all__ = [ + 'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd', + 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder', + 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander', + 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d', + 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion', + 'laggauss', 'lagweight'] lagtrim = pu.trimcoef -def poly2lag(pol) : +def poly2lag(pol): """ poly2lag(pol) @@ -117,12 +118,12 @@ def poly2lag(pol) : [pol] = pu.as_series([pol]) deg = len(pol) - 1 res = 0 - for i in range(deg, -1, -1) : + for i in range(deg, -1, -1): res = lagadd(lagmulx(res), pol[i]) return res -def lag2poly(c) : +def lag2poly(c): """ Convert a Laguerre series to a polynomial. @@ -194,7 +195,7 @@ lagone = np.array([1]) lagx = np.array([1, -1]) -def lagline(off, scl) : +def lagline(off, scl): """ Laguerre series whose graph is a straight line. @@ -224,13 +225,13 @@ def lagline(off, scl) : 5.0 """ - if scl != 0 : + if scl != 0: return np.array([off + scl, -scl]) - else : + else: return np.array([off]) -def lagfromroots(roots) : +def lagfromroots(roots): """ Generate a Laguerre series with given roots. @@ -280,9 +281,9 @@ def lagfromroots(roots) : array([ 0.+0.j, 0.+0.j]) """ - if len(roots) == 0 : + if len(roots) == 0: return np.ones(1) - else : + else: [roots] = pu.as_series([roots], trim=False) roots.sort() p = [lagline(-r, 1) for r in roots] @@ -337,10 +338,10 @@ def lagadd(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : + if len(c1) > len(c2): c1[:c2.size] += c2 ret = c1 - else : + else: c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) @@ -385,10 +386,10 @@ def lagsub(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : + if len(c1) > len(c2): c1[:c2.size] -= c2 ret = c1 - else : + else: c2 = -c2 c2[:c1.size] += c1 ret = c2 @@ -499,13 +500,13 @@ def lagmul(c1, c2): elif len(c) == 2: c0 = c[0]*xs c1 = c[1]*xs - else : + else: nd = len(c) c0 = c[-2]*xs c1 = c[-1]*xs - for i in range(3, len(c) + 1) : + for i in range(3, len(c) + 1): tmp = c0 - nd = nd - 1 + nd = nd - 1 c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd) c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd) return lagadd(c0, lagsub(c1, lagmulx(c1))) @@ -556,16 +557,16 @@ def lagdiv(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0 : + if c2[-1] == 0: raise ZeroDivisionError() lc1 = len(c1) lc2 = len(c2) - if lc1 < lc2 : + if lc1 < lc2: return c1[:1]*0, c1 - elif lc2 == 1 : + elif lc2 == 1: return c1/c2[-1], c1[:1]*0 - else : + else: quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) rem = c1 for i in range(lc1 - lc2, - 1, -1): @@ -576,7 +577,7 @@ def lagdiv(c1, c2): return quo, pu.trimseq(rem) -def lagpow(c, pow, maxpower=16) : +def lagpow(c, pow, maxpower=16): """Raise a Laguerre series to a power. Returns the Laguerre series `c` raised to the power `pow`. The @@ -613,24 +614,24 @@ def lagpow(c, pow, maxpower=16) : # c is a trimmed copy [c] = pu.as_series([c]) power = int(pow) - if power != pow or power < 0 : + if power != pow or power < 0: raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower : + elif maxpower is not None and power > maxpower: raise ValueError("Power is too large") - elif power == 0 : + elif power == 0: return np.array([1], dtype=c.dtype) - elif power == 1 : + elif power == 1: return c - else : + else: # This can be made more efficient by using powers of two # in the usual way. prd = c - for i in range(2, power + 1) : + for i in range(2, power + 1): prd = lagmul(prd, c) return prd -def lagder(c, m=1, scl=1, axis=0) : +def lagder(c, m=1, scl=1, axis=0): """ Differentiate a Laguerre series. @@ -708,7 +709,7 @@ def lagder(c, m=1, scl=1, axis=0) : n = len(c) if cnt >= n: c = c[:1]*0 - else : + else: for i in range(cnt): n = n - 1 c *= scl @@ -815,9 +816,9 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): if cnt != m: raise ValueError("The order of integration must be integer") - if cnt < 0 : + if cnt < 0: raise ValueError("The order of integration must be non-negative") - if len(k) > cnt : + if len(k) > cnt: raise ValueError("Too many integration constants") if iaxis != axis: raise ValueError("The axis must be integer") @@ -831,7 +832,7 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c = np.rollaxis(c, iaxis) k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt) : + for i in range(cnt): n = len(c) c *= scl if n == 1 and np.all(c[0] == 0): @@ -924,22 +925,21 @@ def lagval(x, c, tensor=True): if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) + c = c.reshape(c.shape + (1,)*x.ndim) - - if len(c) == 1 : + if len(c) == 1: c0 = c[0] c1 = 0 - elif len(c) == 2 : + elif len(c) == 2: c0 = c[0] c1 = c[1] - else : + else: nd = len(c) c0 = c[-2] c1 = c[-1] - for i in range(3, len(c) + 1) : + for i in range(3, len(c) + 1): tmp = c0 - nd = nd - 1 + nd = nd - 1 c0 = c[-i] - (c1*(nd - 1))/nd c1 = tmp + (c1*((2*nd - 1) - x))/nd return c0 + c1*(1 - x) @@ -1174,7 +1174,7 @@ def laggrid3d(x, y, z, c): return c -def lagvander(x, deg) : +def lagvander(x, deg): """Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree `deg` and sample points @@ -1229,14 +1229,14 @@ def lagvander(x, deg) : dtyp = x.dtype v = np.empty(dims, dtype=dtyp) v[0] = x*0 + 1 - if ideg > 0 : + if ideg > 0: v[1] = 1 - x - for i in range(2, ideg + 1) : + for i in range(2, ideg + 1): v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i return np.rollaxis(v, 0, v.ndim) -def lagvander2d(x, y, deg) : +def lagvander2d(x, y, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample @@ -1299,7 +1299,7 @@ def lagvander2d(x, y, deg) : return v.reshape(v.shape[:-2] + (-1,)) -def lagvander3d(x, y, z, deg) : +def lagvander3d(x, y, z, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample @@ -1490,13 +1490,13 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): y = np.asarray(y) + 0.0 # check arguments. - if deg < 0 : + if deg < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2 : + if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if len(x) != len(y): raise TypeError("expected x and y to have same length") @@ -1516,7 +1516,7 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): rhs = rhs * w # set rcond - if rcond is None : + if rcond is None: rcond = len(x)*np.finfo(x.dtype).eps # Determine the norms of the design matrix columns. @@ -1535,9 +1535,9 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): msg = "The fit may be poorly conditioned" warnings.warn(msg, pu.RankWarning) - if full : + if full: return c, [resids, rank, s, rcond] - else : + else: return c @@ -1566,7 +1566,6 @@ def lagcompanion(c): .. versionadded::1.7.0 """ - accprod = np.multiply.accumulate # c is a trimmed copy [c] = pu.as_series([c]) if len(c) < 2: @@ -1634,9 +1633,9 @@ def lagroots(c): """ # c is a trimmed copy [c] = pu.as_series([c]) - if len(c) <= 1 : + if len(c) <= 1: return np.array([], dtype=c.dtype) - if len(c) == 2 : + if len(c) == 2: return np.array([1 + c[0]/c[1]]) m = lagcompanion(c) @@ -1651,8 +1650,8 @@ def laggauss(deg): Computes the sample points and weights for Gauss-Laguerre quadrature. These sample points and weights will correctly integrate polynomials of - degree :math:`2*deg - 1` or less over the interval :math:`[0, \inf]` with the - weight function :math:`f(x) = \exp(-x)`. + degree :math:`2*deg - 1` or less over the interval :math:`[0, \inf]` + with the weight function :math:`f(x) = \exp(-x)`. Parameters ---------- diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index 58c130b7e..d2de28269 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -90,17 +90,18 @@ import numpy.linalg as la from . import polyutils as pu from ._polybase import ABCPolyBase -__all__ = ['legzero', 'legone', 'legx', 'legdomain', 'legline', - 'legadd', 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', - 'legder', 'legint', 'leg2poly', 'poly2leg', 'legfromroots', - 'legvander', 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', - 'legval3d', 'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', - 'legcompanion', 'leggauss', 'legweight'] +__all__ = [ + 'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd', + 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder', + 'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander', + 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d', + 'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion', + 'leggauss', 'legweight'] legtrim = pu.trimcoef -def poly2leg(pol) : +def poly2leg(pol): """ Convert a polynomial to a Legendre series. @@ -143,12 +144,12 @@ def poly2leg(pol) : [pol] = pu.as_series([pol]) deg = len(pol) - 1 res = 0 - for i in range(deg, -1, -1) : + for i in range(deg, -1, -1): res = legadd(legmulx(res), pol[i]) return res -def leg2poly(c) : +def leg2poly(c): """ Convert a Legendre series to a polynomial. @@ -202,7 +203,7 @@ def leg2poly(c) : c0 = c[-2] c1 = c[-1] # i is the current degree of c1 - for i in range(n - 1, 1, -1) : + for i in range(n - 1, 1, -1): tmp = c0 c0 = polysub(c[i - 2], (c1*(i - 1))/i) c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i) @@ -226,7 +227,7 @@ legone = np.array([1]) legx = np.array([0, 1]) -def legline(off, scl) : +def legline(off, scl): """ Legendre series whose graph is a straight line. @@ -256,13 +257,13 @@ def legline(off, scl) : -3.0 """ - if scl != 0 : + if scl != 0: return np.array([off, scl]) - else : + else: return np.array([off]) -def legfromroots(roots) : +def legfromroots(roots): """ Generate a Legendre series with given roots. @@ -311,9 +312,9 @@ def legfromroots(roots) : array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) """ - if len(roots) == 0 : + if len(roots) == 0: return np.ones(1) - else : + else: [roots] = pu.as_series([roots], trim=False) roots.sort() p = [legline(-r, 1) for r in roots] @@ -369,10 +370,10 @@ def legadd(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : + if len(c1) > len(c2): c1[:c2.size] += c2 ret = c1 - else : + else: c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) @@ -421,10 +422,10 @@ def legsub(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : + if len(c1) > len(c2): c1[:c2.size] -= c2 ret = c1 - else : + else: c2 = -c2 c2[:c1.size] += c1 ret = c2 @@ -533,13 +534,13 @@ def legmul(c1, c2): elif len(c) == 2: c0 = c[0]*xs c1 = c[1]*xs - else : + else: nd = len(c) c0 = c[-2]*xs c1 = c[-1]*xs - for i in range(3, len(c) + 1) : + for i in range(3, len(c) + 1): tmp = c0 - nd = nd - 1 + nd = nd - 1 c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd) c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd) return legadd(c0, legmulx(c1)) @@ -593,16 +594,16 @@ def legdiv(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0 : + if c2[-1] == 0: raise ZeroDivisionError() lc1 = len(c1) lc2 = len(c2) - if lc1 < lc2 : + if lc1 < lc2: return c1[:1]*0, c1 - elif lc2 == 1 : + elif lc2 == 1: return c1/c2[-1], c1[:1]*0 - else : + else: quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) rem = c1 for i in range(lc1 - lc2, - 1, -1): @@ -613,7 +614,7 @@ def legdiv(c1, c2): return quo, pu.trimseq(rem) -def legpow(c, pow, maxpower=16) : +def legpow(c, pow, maxpower=16): """Raise a Legendre series to a power. Returns the Legendre series `c` raised to the power `pow`. The @@ -647,24 +648,24 @@ def legpow(c, pow, maxpower=16) : # c is a trimmed copy [c] = pu.as_series([c]) power = int(pow) - if power != pow or power < 0 : + if power != pow or power < 0: raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower : + elif maxpower is not None and power > maxpower: raise ValueError("Power is too large") - elif power == 0 : + elif power == 0: return np.array([1], dtype=c.dtype) - elif power == 1 : + elif power == 1: return c - else : + else: # This can be made more efficient by using powers of two # in the usual way. prd = c - for i in range(2, power + 1) : + for i in range(2, power + 1): prd = legmul(prd, c) return prd -def legder(c, m=1, scl=1, axis=0) : +def legder(c, m=1, scl=1, axis=0): """ Differentiate a Legendre series. @@ -747,7 +748,7 @@ def legder(c, m=1, scl=1, axis=0) : n = len(c) if cnt >= n: c = c[:1]*0 - else : + else: for i in range(cnt): n = n - 1 c *= scl @@ -857,9 +858,9 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): if cnt != m: raise ValueError("The order of integration must be integer") - if cnt < 0 : + if cnt < 0: raise ValueError("The order of integration must be non-negative") - if len(k) > cnt : + if len(k) > cnt: raise ValueError("Too many integration constants") if iaxis != axis: raise ValueError("The axis must be integer") @@ -873,7 +874,7 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c = np.rollaxis(c, iaxis) k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt) : + for i in range(cnt): n = len(c) c *= scl if n == 1 and np.all(c[0] == 0): @@ -964,19 +965,19 @@ def legval(x, c, tensor=True): if isinstance(x, np.ndarray) and tensor: c = c.reshape(c.shape + (1,)*x.ndim) - if len(c) == 1 : + if len(c) == 1: c0 = c[0] c1 = 0 - elif len(c) == 2 : + elif len(c) == 2: c0 = c[0] c1 = c[1] - else : + else: nd = len(c) c0 = c[-2] c1 = c[-1] - for i in range(3, len(c) + 1) : + for i in range(3, len(c) + 1): tmp = c0 - nd = nd - 1 + nd = nd - 1 c0 = c[-i] - (c1*(nd - 1))/nd c1 = tmp + (c1*x*(2*nd - 1))/nd return c0 + c1*x @@ -1211,7 +1212,7 @@ def leggrid3d(x, y, z, c): return c -def legvander(x, deg) : +def legvander(x, deg): """Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree `deg` and sample points @@ -1259,14 +1260,14 @@ def legvander(x, deg) : # Use forward recursion to generate the entries. This is not as accurate # as reverse recursion in this application but it is more efficient. v[0] = x*0 + 1 - if ideg > 0 : + if ideg > 0: v[1] = x - for i in range(2, ideg + 1) : + for i in range(2, ideg + 1): v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i return np.rollaxis(v, 0, v.ndim) -def legvander2d(x, y, deg) : +def legvander2d(x, y, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample @@ -1329,7 +1330,7 @@ def legvander2d(x, y, deg) : return v.reshape(v.shape[:-2] + (-1,)) -def legvander3d(x, y, z, deg) : +def legvander3d(x, y, z, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample @@ -1515,13 +1516,13 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): y = np.asarray(y) + 0.0 # check arguments. - if deg < 0 : + if deg < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2 : + if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if len(x) != len(y): raise TypeError("expected x and y to have same length") @@ -1541,7 +1542,7 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): rhs = rhs * w # set rcond - if rcond is None : + if rcond is None: rcond = len(x)*np.finfo(x.dtype).eps # Determine the norms of the design matrix columns. @@ -1560,9 +1561,9 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): msg = "The fit may be poorly conditioned" warnings.warn(msg, pu.RankWarning) - if full : + if full: return c, [resids, rank, s, rcond] - else : + else: return c @@ -1637,11 +1638,11 @@ def legroots(c): ----- The root estimates are obtained as the eigenvalues of the companion matrix, Roots far from the origin of the complex plane may have large - errors due to the numerical instability of the series for such - values. Roots with multiplicity greater than 1 will also show larger - errors as the value of the series near such points is relatively - insensitive to errors in the roots. Isolated roots near the origin can - be improved by a few iterations of Newton's method. + errors due to the numerical instability of the series for such values. + Roots with multiplicity greater than 1 will also show larger errors as + the value of the series near such points is relatively insensitive to + errors in the roots. Isolated roots near the origin can be improved by + a few iterations of Newton's method. The Legendre series basis polynomials aren't powers of ``x`` so the results of this function may seem unintuitive. @@ -1649,7 +1650,7 @@ def legroots(c): Examples -------- >>> import numpy.polynomial.legendre as leg - >>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0 has only real roots + >>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots array([-0.85099543, -0.11407192, 0.51506735]) """ diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 60aaff83f..60e339a1d 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -55,11 +55,12 @@ See Also """ from __future__ import division, absolute_import, print_function -__all__ = ['polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', - 'polyadd', 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', - 'polyval', 'polyder', 'polyint', 'polyfromroots', 'polyvander', - 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', - 'polyval3d', 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d'] +__all__ = [ + 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd', + 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', + 'polyder', 'polyint', 'polyfromroots', 'polyvander', 'polyfit', + 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', + 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d'] import warnings import numpy as np @@ -92,7 +93,7 @@ polyx = np.array([0, 1]) # -def polyline(off, scl) : +def polyline(off, scl): """ Returns an array representing a linear polynomial. @@ -113,20 +114,20 @@ def polyline(off, scl) : Examples -------- - >>> from numpy import polynomial as P + >>> from numpy.polynomial import polynomial as P >>> P.polyline(1,-1) array([ 1, -1]) >>> P.polyval(1, P.polyline(1,-1)) # should be 0 0.0 """ - if scl != 0 : + if scl != 0: return np.array([off, scl]) - else : + else: return np.array([off]) -def polyfromroots(roots) : +def polyfromroots(roots): """ Generate a monic polynomial with given roots. @@ -176,7 +177,7 @@ def polyfromroots(roots) : Examples -------- - >>> import numpy.polynomial as P + >>> from numpy.polynomial import polynomial as P >>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x array([ 0., -1., 0., 1.]) >>> j = complex(0,1) @@ -184,9 +185,9 @@ def polyfromroots(roots) : array([ 1.+0.j, 0.+0.j, 1.+0.j]) """ - if len(roots) == 0 : + if len(roots) == 0: return np.ones(1) - else : + else: [roots] = pu.as_series([roots], trim=False) roots.sort() p = [polyline(-r, 1) for r in roots] @@ -225,7 +226,7 @@ def polyadd(c1, c2): Examples -------- - >>> from numpy import polynomial as P + >>> from numpy.polynomial import polynomial as P >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> sum = P.polyadd(c1,c2); sum @@ -236,10 +237,10 @@ def polyadd(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : + if len(c1) > len(c2): c1[:c2.size] += c2 ret = c1 - else : + else: c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) @@ -270,7 +271,7 @@ def polysub(c1, c2): Examples -------- - >>> from numpy import polynomial as P + >>> from numpy.polynomial import polynomial as P >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> P.polysub(c1,c2) @@ -281,10 +282,10 @@ def polysub(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : + if len(c1) > len(c2): c1[:c2.size] -= c2 ret = c1 - else : + else: c2 = -c2 c2[:c1.size] += c1 ret = c2 @@ -352,7 +353,7 @@ def polymul(c1, c2): Examples -------- - >>> import numpy.polynomial as P + >>> from numpy.polynomial import polynomial as P >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> P.polymul(c1,c2) @@ -389,7 +390,7 @@ def polydiv(c1, c2): Examples -------- - >>> import numpy.polynomial as P + >>> from numpy.polynomial import polynomial as P >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> P.polydiv(c1,c2) @@ -400,29 +401,29 @@ def polydiv(c1, c2): """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0 : + if c2[-1] == 0: raise ZeroDivisionError() len1 = len(c1) len2 = len(c2) - if len2 == 1 : + if len2 == 1: return c1/c2[-1], c1[:1]*0 - elif len1 < len2 : + elif len1 < len2: return c1[:1]*0, c1 - else : + else: dlen = len1 - len2 scl = c2[-1] - c2 = c2[:-1]/scl + c2 = c2[:-1]/scl i = dlen j = len1 - 1 - while i >= 0 : + while i >= 0: c1[i:j] -= c2*c1[j] i -= 1 j -= 1 return c1[j+1:]/scl, pu.trimseq(c1[:j+1]) -def polypow(c, pow, maxpower=None) : +def polypow(c, pow, maxpower=None): """Raise a polynomial to a power. Returns the polynomial `c` raised to the power `pow`. The argument @@ -456,19 +457,19 @@ def polypow(c, pow, maxpower=None) : # c is a trimmed copy [c] = pu.as_series([c]) power = int(pow) - if power != pow or power < 0 : + if power != pow or power < 0: raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower : + elif maxpower is not None and power > maxpower: raise ValueError("Power is too large") - elif power == 0 : + elif power == 0: return np.array([1], dtype=c.dtype) - elif power == 1 : + elif power == 1: return c - else : + else: # This can be made more efficient by using powers of two # in the usual way. prd = c - for i in range(2, power + 1) : + for i in range(2, power + 1): prd = np.convolve(prd, c) return prd @@ -513,7 +514,7 @@ def polyder(c, m=1, scl=1, axis=0): Examples -------- - >>> from numpy import polynomial as P + >>> from numpy.polynomial import polynomial as P >>> c = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3 >>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2 array([ 2., 6., 12.]) @@ -550,7 +551,7 @@ def polyder(c, m=1, scl=1, axis=0): n = len(c) if cnt >= n: c = c[:1]*0 - else : + else: for i in range(cnt): n = n - 1 c *= scl @@ -624,7 +625,7 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): Examples -------- - >>> from numpy import polynomial as P + >>> from numpy.polynomial import polynomial as P >>> c = (1,2,3) >>> P.polyint(c) # should return array([0, 1, 1, 1]) array([ 0., 1., 1., 1.]) @@ -650,9 +651,9 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): if cnt != m: raise ValueError("The order of integration must be integer") - if cnt < 0 : + if cnt < 0: raise ValueError("The order of integration must be non-negative") - if len(k) > cnt : + if len(k) > cnt: raise ValueError("Too many integration constants") if iaxis != axis: raise ValueError("The axis must be integer") @@ -661,7 +662,6 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): if iaxis < 0: iaxis += c.ndim - if cnt == 0: return c @@ -775,7 +775,7 @@ def polyval(x, c, tensor=True): c = c.reshape(c.shape + (1,)*x.ndim) c0 = c[-1] + x*0 - for i in range(2, len(c) + 1) : + for i in range(2, len(c) + 1): c0 = c[-i] + c0*x return c0 @@ -1010,7 +1010,7 @@ def polygrid3d(x, y, z, c): return c -def polyvander(x, deg) : +def polyvander(x, deg): """Vandermonde matrix of given degree. Returns the Vandermonde matrix of degree `deg` and sample points @@ -1059,14 +1059,14 @@ def polyvander(x, deg) : dtyp = x.dtype v = np.empty(dims, dtype=dtyp) v[0] = x*0 + 1 - if ideg > 0 : + if ideg > 0: v[1] = x - for i in range(2, ideg + 1) : + for i in range(2, ideg + 1): v[i] = v[i-1]*x return np.rollaxis(v, 0, v.ndim) -def polyvander2d(x, y, deg) : +def polyvander2d(x, y, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample @@ -1126,7 +1126,7 @@ def polyvander2d(x, y, deg) : return v.reshape(v.shape[:-2] + (-1,)) -def polyvander3d(x, y, z, deg) : +def polyvander3d(x, y, z, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample @@ -1254,7 +1254,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): rcond -- value of `rcond`. For more details, see `linalg.lstsq`. - + Raises ------ RankWarning @@ -1310,7 +1310,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): Examples -------- - >>> from numpy import polynomial as P + >>> from numpy.polynomial import polynomial as P >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise" >>> c, stats = P.polyfit(x,y,3,full=True) @@ -1337,13 +1337,13 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): y = np.asarray(y) + 0.0 # check arguments. - if deg < 0 : + if deg < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2 : + if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if len(x) != len(y): raise TypeError("expected x and y to have same length") @@ -1363,7 +1363,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): rhs = rhs * w # set rcond - if rcond is None : + if rcond is None: rcond = len(x)*np.finfo(x.dtype).eps # Determine the norms of the design matrix columns. @@ -1382,9 +1382,9 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): msg = "The fit may be poorly conditioned" warnings.warn(msg, pu.RankWarning) - if full : + if full: return c, [resids, rank, s, rcond] - else : + else: return c @@ -1415,7 +1415,7 @@ def polycompanion(c): """ # c is a trimmed copy [c] = pu.as_series([c]) - if len(c) < 2 : + if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: return np.array([[-c[0]/c[1]]]) diff --git a/numpy/polynomial/polytemplate.py b/numpy/polynomial/polytemplate.py deleted file mode 100644 index e68dd18ef..000000000 --- a/numpy/polynomial/polytemplate.py +++ /dev/null @@ -1,927 +0,0 @@ -""" -Template for the Chebyshev and Polynomial classes. - -This module houses a Python string module Template object (see, e.g., -http://docs.python.org/library/string.html#template-strings) used by -the `polynomial` and `chebyshev` modules to implement their respective -`Polynomial` and `Chebyshev` classes. It provides a mechanism for easily -creating additional specific polynomial classes (e.g., Legendre, Jacobi, -etc.) in the future, such that all these classes will have a common API. - -""" -from __future__ import division, absolute_import, print_function - -import string -import sys -import warnings -from number import Number - -from numpy import ModuleDeprecationWarning - -warnings.warn("The polytemplate module will be removed in Numpy 1.10.0.", - ModuleDeprecationWarning) - -polytemplate = string.Template(''' -from __future__ import division, absolute_import, print_function -import numpy as np -import warnings -from . import polyutils as pu - -class $name(pu.PolyBase) : - """A $name series class. - - $name instances provide the standard Python numerical methods '+', - '-', '*', '//', '%', 'divmod', '**', and '()' as well as the listed - methods. - - Parameters - ---------- - coef : array_like - $name coefficients, in increasing order. For example, - ``(1, 2, 3)`` implies ``P_0 + 2P_1 + 3P_2`` where the - ``P_i`` are a graded polynomial basis. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to - the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is $domain. - window : (2,) array_like, optional - Window, see ``domain`` for its use. The default value is $domain. - .. versionadded:: 1.6.0 - - Attributes - ---------- - coef : (N,) ndarray - $name coefficients, from low to high. - domain : (2,) ndarray - Domain that is mapped to ``window``. - window : (2,) ndarray - Window that ``domain`` is mapped to. - - Class Attributes - ---------------- - maxpower : int - Maximum power allowed, i.e., the largest number ``n`` such that - ``p(x)**n`` is allowed. This is to limit runaway polynomial size. - domain : (2,) ndarray - Default domain of the class. - window : (2,) ndarray - Default window of the class. - - Notes - ----- - It is important to specify the domain in many cases, for instance in - fitting data, because many of the important properties of the - polynomial basis only hold in a specified interval and consequently - the data must be mapped into that interval in order to benefit. - - Examples - -------- - - """ - # Limit runaway size. T_n^m has degree n*2^m - maxpower = 16 - # Default domain - domain = np.array($domain) - # Default window - window = np.array($domain) - # Don't let participate in array operations. Value doesn't matter. - __array_priority__ = 1000 - # Not hashable - __hash__ = None - - def has_samecoef(self, other): - """Check if coefficients match. - - Parameters - ---------- - other : class instance - The other class must have the ``coef`` attribute. - - Returns - ------- - bool : boolean - True if the coefficients are the same, False otherwise. - - Notes - ----- - .. versionadded:: 1.6.0 - - """ - if len(self.coef) != len(other.coef): - return False - elif not np.all(self.coef == other.coef): - return False - else: - return True - - def has_samedomain(self, other): - """Check if domains match. - - Parameters - ---------- - other : class instance - The other class must have the ``domain`` attribute. - - Returns - ------- - bool : boolean - True if the domains are the same, False otherwise. - - Notes - ----- - .. versionadded:: 1.6.0 - - """ - return np.all(self.domain == other.domain) - - def has_samewindow(self, other): - """Check if windows match. - - Parameters - ---------- - other : class instance - The other class must have the ``window`` attribute. - - Returns - ------- - bool : boolean - True if the windows are the same, False otherwise. - - Notes - ----- - .. versionadded:: 1.6.0 - - """ - return np.all(self.window == other.window) - - def has_sametype(self, other): - """Check if types match. - - Parameters - ---------- - other : object - Class instance. - - Returns - ------- - bool : boolean - True if other is same class as self - - Notes - ----- - .. versionadded:: 1.7.0 - - """ - return isinstance(other, self.__class__) - - def __init__(self, coef, domain=$domain, window=$domain) : - [coef, dom, win] = pu.as_series([coef, domain, window], trim=False) - if len(dom) != 2 : - raise ValueError("Domain has wrong number of elements.") - if len(win) != 2 : - raise ValueError("Window has wrong number of elements.") - self.coef = coef - self.domain = dom - self.window = win - - def __repr__(self): - format = "%s(%s, %s, %s)" - coef = repr(self.coef)[6:-1] - domain = repr(self.domain)[6:-1] - window = repr(self.window)[6:-1] - return format % ('$name', coef, domain, window) - - def __str__(self) : - format = "%s(%s)" - coef = str(self.coef) - return format % ('$nick', coef) - - # Pickle and copy - - def __getstate__(self) : - ret = self.__dict__.copy() - ret['coef'] = self.coef.copy() - ret['domain'] = self.domain.copy() - ret['window'] = self.window.copy() - return ret - - def __setstate__(self, dict) : - self.__dict__ = dict - - # Call - - def __call__(self, arg) : - off, scl = pu.mapparms(self.domain, self.window) - arg = off + scl*arg - return ${nick}val(arg, self.coef) - - def __iter__(self) : - return iter(self.coef) - - def __len__(self) : - return len(self.coef) - - # Numeric properties. - - def __neg__(self) : - return self.__class__(-self.coef, self.domain, self.window) - - def __pos__(self) : - return self - - def __add__(self, other) : - """Returns sum""" - if isinstance(other, pu.PolyBase): - if not self.has_sametype(other): - raise TypeError("Polynomial types differ") - elif not self.has_samedomain(other): - raise TypeError("Domains differ") - elif not self.has_samewindow(other): - raise TypeError("Windows differ") - else: - coef = ${nick}add(self.coef, other.coef) - else : - try : - coef = ${nick}add(self.coef, other) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __sub__(self, other) : - """Returns difference""" - if isinstance(other, pu.PolyBase): - if not self.has_sametype(other): - raise TypeError("Polynomial types differ") - elif not self.has_samedomain(other): - raise TypeError("Domains differ") - elif not self.has_samewindow(other): - raise TypeError("Windows differ") - else: - coef = ${nick}sub(self.coef, other.coef) - else : - try : - coef = ${nick}sub(self.coef, other) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __mul__(self, other) : - """Returns product""" - if isinstance(other, pu.PolyBase): - if not self.has_sametype(other): - raise TypeError("Polynomial types differ") - elif not self.has_samedomain(other): - raise TypeError("Domains differ") - elif not self.has_samewindow(other): - raise TypeError("Windows differ") - else: - coef = ${nick}mul(self.coef, other.coef) - else : - try : - coef = ${nick}mul(self.coef, other) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __div__(self, other): - # set to __floordiv__, /, for now. - return self.__floordiv__(other) - - def __truediv__(self, other) : - # there is no true divide if the rhs is not a Number, although it - # could return the first n elements of an infinite series. - # It is hard to see where n would come from, though. - if not isinstance(other, Number) or isinstance(other, bool): - form = "unsupported types for true division: '%s', '%s'" - raise TypeError(form % (type(self), type(other))) - return self.__floordiv__(other) - - def __floordiv__(self, other) : - """Returns the quotient.""" - if isinstance(other, pu.PolyBase): - if not self.has_sametype(other): - raise TypeError("Polynomial types differ") - elif not self.has_samedomain(other): - raise TypeError("Domains differ") - elif not self.has_samewindow(other): - raise TypeError("Windows differ") - else: - quo, rem = ${nick}div(self.coef, other.coef) - else : - try : - quo, rem = ${nick}div(self.coef, other) - except : - return NotImplemented - return self.__class__(quo, self.domain, self.window) - - def __mod__(self, other) : - """Returns the remainder.""" - if isinstance(other, pu.PolyBase): - if not self.has_sametype(other): - raise TypeError("Polynomial types differ") - elif not self.has_samedomain(other): - raise TypeError("Domains differ") - elif not self.has_samewindow(other): - raise TypeError("Windows differ") - else: - quo, rem = ${nick}div(self.coef, other.coef) - else : - try : - quo, rem = ${nick}div(self.coef, other) - except : - return NotImplemented - return self.__class__(rem, self.domain, self.window) - - def __divmod__(self, other) : - """Returns quo, remainder""" - if isinstance(other, self.__class__) : - if not self.has_samedomain(other): - raise TypeError("Domains are not equal") - elif not self.has_samewindow(other): - raise TypeError("Windows are not equal") - else: - quo, rem = ${nick}div(self.coef, other.coef) - else : - try : - quo, rem = ${nick}div(self.coef, other) - except : - return NotImplemented - quo = self.__class__(quo, self.domain, self.window) - rem = self.__class__(rem, self.domain, self.window) - return quo, rem - - def __pow__(self, other) : - try : - coef = ${nick}pow(self.coef, other, maxpower = self.maxpower) - except : - raise - return self.__class__(coef, self.domain, self.window) - - def __radd__(self, other) : - try : - coef = ${nick}add(other, self.coef) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __rsub__(self, other): - try : - coef = ${nick}sub(other, self.coef) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __rmul__(self, other) : - try : - coef = ${nick}mul(other, self.coef) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __rdiv__(self, other): - # set to __floordiv__ /. - return self.__rfloordiv__(other) - - def __rtruediv__(self, other) : - # An instance of PolyBase is not considered a - # Number. - return NotImplemented - - def __rfloordiv__(self, other) : - try : - quo, rem = ${nick}div(other, self.coef) - except: - return NotImplemented - return self.__class__(quo, self.domain, self.window) - - def __rmod__(self, other) : - try : - quo, rem = ${nick}div(other, self.coef) - except : - return NotImplemented - return self.__class__(rem, self.domain, self.window) - - def __rdivmod__(self, other) : - try : - quo, rem = ${nick}div(other, self.coef) - except : - return NotImplemented - quo = self.__class__(quo, self.domain, self.window) - rem = self.__class__(rem, self.domain, self.window) - return quo, rem - - # Enhance me - # some augmented arithmetic operations could be added here - - def __eq__(self, other) : - res = isinstance(other, self.__class__) \ - and self.has_samecoef(other) \ - and self.has_samedomain(other) \ - and self.has_samewindow(other) - return res - - def __ne__(self, other) : - return not self.__eq__(other) - - # - # Extra methods. - # - - def copy(self) : - """Return a copy. - - Return a copy of the current $name instance. - - Returns - ------- - new_instance : $name - Copy of current instance. - - """ - return self.__class__(self.coef, self.domain, self.window) - - def degree(self) : - """The degree of the series. - - Notes - ----- - .. versionadded:: 1.5.0 - - """ - return len(self) - 1 - - def cutdeg(self, deg) : - """Truncate series to the given degree. - - Reduce the degree of the $name series to `deg` by discarding the - high order terms. If `deg` is greater than the current degree a - copy of the current series is returned. This can be useful in least - squares where the coefficients of the high degree terms may be very - small. - - Parameters - ---------- - deg : non-negative int - The series is reduced to degree `deg` by discarding the high - order terms. The value of `deg` must be a non-negative integer. - - Returns - ------- - new_instance : $name - New instance of $name with reduced degree. - - Notes - ----- - .. versionadded:: 1.5.0 - - """ - return self.truncate(deg + 1) - - def trim(self, tol=0) : - """Remove small leading coefficients - - Remove leading coefficients until a coefficient is reached whose - absolute value greater than `tol` or the beginning of the series is - reached. If all the coefficients would be removed the series is set to - ``[0]``. A new $name instance is returned with the new coefficients. - The current instance remains unchanged. - - Parameters - ---------- - tol : non-negative number. - All trailing coefficients less than `tol` will be removed. - - Returns - ------- - new_instance : $name - Contains the new set of coefficients. - - """ - coef = pu.trimcoef(self.coef, tol) - return self.__class__(coef, self.domain, self.window) - - def truncate(self, size) : - """Truncate series to length `size`. - - Reduce the $name series to length `size` by discarding the high - degree terms. The value of `size` must be a positive integer. This - can be useful in least squares where the coefficients of the - high degree terms may be very small. - - Parameters - ---------- - size : positive int - The series is reduced to length `size` by discarding the high - degree terms. The value of `size` must be a positive integer. - - Returns - ------- - new_instance : $name - New instance of $name with truncated coefficients. - - """ - isize = int(size) - if isize != size or isize < 1 : - raise ValueError("size must be a positive integer") - if isize >= len(self.coef) : - coef = self.coef - else : - coef = self.coef[:isize] - return self.__class__(coef, self.domain, self.window) - - def convert(self, domain=None, kind=None, window=None) : - """Convert to different class and/or domain. - - Parameters - ---------- - domain : array_like, optional - The domain of the converted series. If the value is None, - the default domain of `kind` is used. - kind : class, optional - The polynomial series type class to which the current instance - should be converted. If kind is None, then the class of the - current instance is used. - window : array_like, optional - The window of the converted series. If the value is None, - the default window of `kind` is used. - - Returns - ------- - new_series_instance : `kind` - The returned class can be of different type than the current - instance and/or have a different domain. - - Notes - ----- - Conversion between domains and class types can result in - numerically ill defined series. - - Examples - -------- - - """ - if kind is None: - kind = $name - if domain is None: - domain = kind.domain - if window is None: - window = kind.window - return self(kind.identity(domain, window=window)) - - def mapparms(self) : - """Return the mapping parameters. - - The returned values define a linear map ``off + scl*x`` that is - applied to the input arguments before the series is evaluated. The - map depends on the ``domain`` and ``window``; if the current - ``domain`` is equal to the ``window`` the resulting map is the - identity. If the coefficients of the ``$name`` instance are to be - used by themselves outside this class, then the linear function - must be substituted for the ``x`` in the standard representation of - the base polynomials. - - Returns - ------- - off, scl : floats or complex - The mapping function is defined by ``off + scl*x``. - - Notes - ----- - If the current domain is the interval ``[l_1, r_1]`` and the window - is ``[l_2, r_2]``, then the linear mapping function ``L`` is - defined by the equations:: - - L(l_1) = l_2 - L(r_1) = r_2 - - """ - return pu.mapparms(self.domain, self.window) - - def integ(self, m=1, k=[], lbnd=None) : - """Integrate. - - Return an instance of $name that is the definite integral of the - current series. Refer to `${nick}int` for full documentation. - - Parameters - ---------- - m : non-negative int - The number of integrations to perform. - k : array_like - Integration constants. The first constant is applied to the - first integration, the second to the second, and so on. The - list of values must less than or equal to `m` in length and any - missing values are set to zero. - lbnd : Scalar - The lower bound of the definite integral. - - Returns - ------- - integral : $name - The integral of the series using the same domain. - - See Also - -------- - ${nick}int : similar function. - ${nick}der : similar function for derivative. - - """ - off, scl = self.mapparms() - if lbnd is None : - lbnd = 0 - else : - lbnd = off + scl*lbnd - coef = ${nick}int(self.coef, m, k, lbnd, 1./scl) - return self.__class__(coef, self.domain, self.window) - - def deriv(self, m=1): - """Differentiate. - - Return an instance of $name that is the derivative of the current - series. Refer to `${nick}der` for full documentation. - - Parameters - ---------- - m : non-negative int - The number of integrations to perform. - - Returns - ------- - derivative : $name - The derivative of the series using the same domain. - - See Also - -------- - ${nick}der : similar function. - ${nick}int : similar function for integration. - - """ - off, scl = self.mapparms() - coef = ${nick}der(self.coef, m, scl) - return self.__class__(coef, self.domain, self.window) - - def roots(self) : - """Return list of roots. - - Return ndarray of roots for this series. See `${nick}roots` for - full documentation. Note that the accuracy of the roots is likely to - decrease the further outside the domain they lie. - - See Also - -------- - ${nick}roots : similar function - ${nick}fromroots : function to go generate series from roots. - - """ - roots = ${nick}roots(self.coef) - return pu.mapdomain(roots, self.window, self.domain) - - def linspace(self, n=100, domain=None): - """Return x,y values at equally spaced points in domain. - - Returns x, y values at `n` linearly spaced points across domain. - Here y is the value of the polynomial at the points x. By default - the domain is the same as that of the $name instance. This method - is intended mostly as a plotting aid. - - Parameters - ---------- - n : int, optional - Number of point pairs to return. The default value is 100. - domain : {None, array_like} - If not None, the specified domain is used instead of that of - the calling instance. It should be of the form ``[beg,end]``. - The default is None. - - Returns - ------- - x, y : ndarrays - ``x`` is equal to linspace(self.domain[0], self.domain[1], n) - ``y`` is the polynomial evaluated at ``x``. - - .. versionadded:: 1.5.0 - - """ - if domain is None: - domain = self.domain - x = np.linspace(domain[0], domain[1], n) - y = self(x) - return x, y - - - - @staticmethod - def fit(x, y, deg, domain=None, rcond=None, full=False, w=None, - window=$domain): - """Least squares fit to data. - - Return a `$name` instance that is the least squares fit to the data - `y` sampled at `x`. Unlike `${nick}fit`, the domain of the returned - instance can be specified and this will often result in a superior - fit with less chance of ill conditioning. Support for NA was added - in version 1.7.0. See `${nick}fit` for full documentation of the - implementation. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial. - domain : {None, [beg, end], []}, optional - Domain to use for the returned $name instance. If ``None``, - then a minimal domain that covers the points `x` is chosen. If - ``[]`` the default domain ``$domain`` is used. The default - value is $domain in numpy 1.4.x and ``None`` in later versions. - The ``[]`` value was added in numpy 1.5.0. - rcond : float, optional - Relative condition number of the fit. Singular values smaller - than this relative to the largest singular value will be - ignored. The default value is len(x)*eps, where eps is the - relative precision of the float type, about 2e-16 in most - cases. - full : bool, optional - Switch determining nature of return value. When it is False - (the default) just the coefficients are returned, when True - diagnostic information from the singular value decomposition is - also returned. - w : array_like, shape (M,), optional - Weights. If not None the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products - ``w[i]*y[i]`` all have the same variance. The default value is - None. - .. versionadded:: 1.5.0 - window : {[beg, end]}, optional - Window to use for the returned $name instance. The default - value is ``$domain`` - .. versionadded:: 1.6.0 - - Returns - ------- - least_squares_fit : instance of $name - The $name instance is the least squares fit to the data and - has the domain specified in the call. - - [residuals, rank, singular_values, rcond] : only if `full` = True - Residuals of the least squares fit, the effective rank of the - scaled Vandermonde matrix and its singular values, and the - specified value of `rcond`. For more details, see - `linalg.lstsq`. - - See Also - -------- - ${nick}fit : similar function - - """ - if domain is None: - domain = pu.getdomain(x) - elif type(domain) is list and len(domain) == 0: - domain = $domain - - if type(window) is list and len(window) == 0: - window = $domain - - xnew = pu.mapdomain(x, domain, window) - res = ${nick}fit(xnew, y, deg, w=w, rcond=rcond, full=full) - if full : - [coef, status] = res - return $name(coef, domain=domain, window=window), status - else : - coef = res - return $name(coef, domain=domain, window=window) - - @staticmethod - def fromroots(roots, domain=$domain, window=$domain) : - """Return $name instance with specified roots. - - Returns an instance of $name representing the product - ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is the - list of roots. - - Parameters - ---------- - roots : array_like - List of roots. - domain : {array_like, None}, optional - Domain for the resulting instance of $name. If none the domain - is the interval from the smallest root to the largest. The - default is $domain. - window : array_like, optional - Window for the resulting instance of $name. The default value - is $domain. - - Returns - ------- - object : $name instance - Series with the specified roots. - - See Also - -------- - ${nick}fromroots : equivalent function - - """ - [roots] = pu.as_series([roots], trim=False) - if domain is None : - domain = pu.getdomain(roots) - deg = len(roots) - off, scl = pu.mapparms(domain, window) - rnew = off + scl*roots - coef = ${nick}fromroots(rnew) / scl**deg - return $name(coef, domain=domain, window=window) - - @staticmethod - def identity(domain=$domain, window=$domain) : - """Identity function. - - If ``p`` is the returned $name object, then ``p(x) == x`` for all - values of x. - - Parameters - ---------- - domain : array_like - The resulting array must be of the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the domain. - window : array_like - The resulting array must be if the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the window. - - Returns - ------- - identity : $name instance - - """ - off, scl = pu.mapparms(window, domain) - coef = ${nick}line(off, scl) - return $name(coef, domain, window) - - @staticmethod - def basis(deg, domain=$domain, window=$domain): - """$name polynomial of degree `deg`. - - Returns an instance of the $name polynomial of degree `d`. - - Parameters - ---------- - deg : int - Degree of the $name polynomial. Must be >= 0. - domain : array_like - The resulting array must be of the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the domain. - window : array_like - The resulting array must be if the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the window. - - Returns - p : $name instance - - Notes - ----- - .. versionadded:: 1.7.0 - - """ - ideg = int(deg) - if ideg != deg or ideg < 0: - raise ValueError("deg must be non-negative integer") - return $name([0]*ideg + [1], domain, window) - - @staticmethod - def cast(series, domain=$domain, window=$domain): - """Convert instance to equivalent $name series. - - The `series` is expected to be an instance of some polynomial - series of one of the types supported by by the numpy.polynomial - module, but could be some other class that supports the convert - method. - - Parameters - ---------- - series : series - The instance series to be converted. - domain : array_like - The resulting array must be of the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the domain. - window : array_like - The resulting array must be if the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the window. - - Returns - p : $name instance - A $name series equal to the `poly` series. - - See Also - -------- - convert -- similar instance method - - Notes - ----- - .. versionadded:: 1.7.0 - - """ - return series.convert(domain, $name, window) - -''') diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index 99f508521..9348559ed 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -45,27 +45,25 @@ Functions """ from __future__ import division, absolute_import, print_function -__all__ = ['RankWarning', 'PolyError', 'PolyDomainError', 'as_series', - 'trimseq', 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', - 'PolyBase'] - -import warnings import numpy as np -import sys + +__all__ = [ + 'RankWarning', 'PolyError', 'PolyDomainError', 'as_series', 'trimseq', + 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', 'PolyBase'] # # Warnings and Exceptions # -class RankWarning(UserWarning) : +class RankWarning(UserWarning): """Issued by chebfit when the design matrix is rank deficient.""" pass -class PolyError(Exception) : +class PolyError(Exception): """Base class for errors in this module.""" pass -class PolyDomainError(PolyError) : +class PolyDomainError(PolyError): """Issued by the generic Poly class when two domains don't match. This is raised when an binary operation is passed Poly objects with @@ -78,7 +76,7 @@ class PolyDomainError(PolyError) : # Base class for all polynomial types # -class PolyBase(object) : +class PolyBase(object): """ Base class for all polynomial types. @@ -93,7 +91,7 @@ class PolyBase(object) : # # Helper functions to convert inputs to 1-D arrays # -def trimseq(seq) : +def trimseq(seq): """Remove small Poly series coefficients. Parameters @@ -114,16 +112,16 @@ def trimseq(seq) : Do not lose the type info if the sequence contains unknown objects. """ - if len(seq) == 0 : + if len(seq) == 0: return seq - else : - for i in range(len(seq) - 1, -1, -1) : - if seq[i] != 0 : + else: + for i in range(len(seq) - 1, -1, -1): + if seq[i] != 0: break return seq[:i+1] -def as_series(alist, trim=True) : +def as_series(alist, trim=True): """ Return argument as a list of 1-d arrays. @@ -165,32 +163,32 @@ def as_series(alist, trim=True) : """ arrays = [np.array(a, ndmin=1, copy=0) for a in alist] - if min([a.size for a in arrays]) == 0 : + if min([a.size for a in arrays]) == 0: raise ValueError("Coefficient array is empty") - if any([a.ndim != 1 for a in arrays]) : + if any([a.ndim != 1 for a in arrays]): raise ValueError("Coefficient array is not 1-d") - if trim : + if trim: arrays = [trimseq(a) for a in arrays] - if any([a.dtype == np.dtype(object) for a in arrays]) : + if any([a.dtype == np.dtype(object) for a in arrays]): ret = [] - for a in arrays : - if a.dtype != np.dtype(object) : + for a in arrays: + if a.dtype != np.dtype(object): tmp = np.empty(len(a), dtype=np.dtype(object)) tmp[:] = a[:] ret.append(tmp) - else : + else: ret.append(a.copy()) - else : - try : + else: + try: dtype = np.common_type(*arrays) - except : + except: raise ValueError("Coefficient arrays have no common type") ret = [np.array(a, copy=1, dtype=dtype) for a in arrays] return ret -def trimcoef(c, tol=0) : +def trimcoef(c, tol=0): """ Remove "small" "trailing" coefficients from a polynomial. @@ -234,17 +232,17 @@ def trimcoef(c, tol=0) : array([ 0.0003+0.j , 0.0010-0.001j]) """ - if tol < 0 : + if tol < 0: raise ValueError("tol must be non-negative") [c] = as_series([c]) [ind] = np.where(np.abs(c) > tol) - if len(ind) == 0 : + if len(ind) == 0: return c[:1]*0 - else : + else: return c[:ind[-1] + 1].copy() -def getdomain(x) : +def getdomain(x): """ Return a domain suitable for given abscissae. @@ -283,14 +281,14 @@ def getdomain(x) : """ [x] = as_series([x], trim=False) - if x.dtype.char in np.typecodes['Complex'] : + if x.dtype.char in np.typecodes['Complex']: rmin, rmax = x.real.min(), x.real.max() imin, imax = x.imag.min(), x.imag.max() return np.array((complex(rmin, imin), complex(rmax, imax))) - else : + else: return np.array((x.min(), x.max())) -def mapparms(old, new) : +def mapparms(old, new): """ Linear map parameters between domains. @@ -337,7 +335,7 @@ def mapparms(old, new) : scl = newlen/oldlen return off, scl -def mapdomain(x, old, new) : +def mapdomain(x, old, new): """ Apply linear map to input points. diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py index 82c3ba9ea..a596905f6 100644 --- a/numpy/polynomial/tests/test_chebyshev.py +++ b/numpy/polynomial/tests/test_chebyshev.py @@ -400,14 +400,14 @@ class TestFitting(TestCase): return x*(x - 1)*(x - 2) # Test exceptions - assert_raises(ValueError, cheb.chebfit, [1], [1], -1) - assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) - assert_raises(TypeError, cheb.chebfit, [], [1], 0) - assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0) - assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) - assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) - assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, cheb.chebfit, [1], [1], -1) + assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) + assert_raises(TypeError, cheb.chebfit, [], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0) + assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1]) # Test fit x = np.linspace(0, 2) @@ -532,7 +532,7 @@ class TestMisc(TestCase): assert_almost_equal(cheb.chebpts1(2), tgt) tgt = [-0.86602540378443871, 0, 0.86602540378443871] assert_almost_equal(cheb.chebpts1(3), tgt) - tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325] + tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325] assert_almost_equal(cheb.chebpts1(4), tgt) def test_chebpts2(self): diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py index f9134b8c1..cd5a54687 100644 --- a/numpy/polynomial/tests/test_classes.py +++ b/numpy/polynomial/tests/test_classes.py @@ -10,12 +10,10 @@ from numbers import Number import numpy as np from numpy.polynomial import ( - Polynomial, Legendre, Chebyshev, Laguerre, - Hermite, HermiteE) + Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE) from numpy.testing import ( - TestCase, assert_almost_equal, assert_raises, - assert_equal, assert_, run_module_suite, dec) -from numpy.testing.noseclasses import KnownFailure + assert_almost_equal, assert_raises, assert_equal, assert_, + run_module_suite) from numpy.compat import long @@ -410,6 +408,9 @@ def check_roots(Poly): d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 tgt = np.sort(random((5,))) + res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots()) + assert_almost_equal(res, tgt) + # default domain and window res = np.sort(Poly.fromroots(tgt).roots()) assert_almost_equal(res, tgt) @@ -468,6 +469,12 @@ def check_deriv(Poly): p3 = p1.integ(1, k=[1]) assert_almost_equal(p2.deriv(1).coef, p3.coef) assert_almost_equal(p2.deriv(2).coef, p1.coef) + # default domain and window + p1 = Poly([1, 2, 3]) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) def check_linspace(Poly): @@ -491,11 +498,18 @@ def check_linspace(Poly): def check_pow(Poly): d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 - tgt = Poly([1], domain=d, window=d) - tst = Poly([1, 2, 3], domain=d, window=d) + tgt = Poly([1], domain=d, window=w) + tst = Poly([1, 2, 3], domain=d, window=w) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # default domain and window + tgt = Poly([1]) + tst = Poly([1, 2, 3]) for i in range(5): assert_poly_almost_equal(tst**i, tgt) tgt = tgt * tst + # check error for invalid powers assert_raises(ValueError, op.pow, tgt, 1.5) assert_raises(ValueError, op.pow, tgt, -1) diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index ac60007d1..e67625a88 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -119,7 +119,6 @@ class TestEvaluation(TestCase): y = [polyval(x, c) for c in Hlist] for i in range(10): msg = "At i=%d" % i - ser = np.zeros tgt = y[i] res = herm.hermval(x, [0]*i + [1]) assert_almost_equal(res, tgt, err_msg=msg) @@ -389,14 +388,14 @@ class TestFitting(TestCase): return x*(x - 1)*(x - 2) # Test exceptions - assert_raises(ValueError, herm.hermfit, [1], [1], -1) - assert_raises(TypeError, herm.hermfit, [[1]], [1], 0) - assert_raises(TypeError, herm.hermfit, [], [1], 0) - assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0) - assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0) - assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) - assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herm.hermfit, [1], [1], -1) + assert_raises(TypeError, herm.hermfit, [[1]], [1], 0) + assert_raises(TypeError, herm.hermfit, [], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0) + assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1]) # Test fit x = np.linspace(0, 2) diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py index 5341dc7ff..f8601a828 100644 --- a/numpy/polynomial/tests/test_hermite_e.py +++ b/numpy/polynomial/tests/test_hermite_e.py @@ -6,7 +6,9 @@ from __future__ import division, absolute_import, print_function import numpy as np import numpy.polynomial.hermite_e as herme from numpy.polynomial.polynomial import polyval -from numpy.testing import * +from numpy.testing import ( + TestCase, assert_almost_equal, assert_raises, + assert_equal, assert_, run_module_suite) He0 = np.array([1]) He1 = np.array([0, 1]) @@ -117,7 +119,6 @@ class TestEvaluation(TestCase): y = [polyval(x, c) for c in Helist] for i in range(10): msg = "At i=%d" % i - ser = np.zeros tgt = y[i] res = herme.hermeval(x, [0]*i + [1]) assert_almost_equal(res, tgt, err_msg=msg) @@ -388,14 +389,14 @@ class TestFitting(TestCase): return x*(x - 1)*(x - 2) # Test exceptions - assert_raises(ValueError, herme.hermefit, [1], [1], -1) - assert_raises(TypeError, herme.hermefit, [[1]], [1], 0) - assert_raises(TypeError, herme.hermefit, [], [1], 0) - assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0) - assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0) - assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) - assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herme.hermefit, [1], [1], -1) + assert_raises(TypeError, herme.hermefit, [[1]], [1], 0) + assert_raises(TypeError, herme.hermefit, [], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0) + assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1]) # Test fit x = np.linspace(0, 2) diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index b3d8fe5ee..1dc57a960 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -116,7 +116,6 @@ class TestEvaluation(TestCase): y = [polyval(x, c) for c in Llist] for i in range(7): msg = "At i=%d" % i - ser = np.zeros tgt = y[i] res = lag.lagval(x, [0]*i + [1]) assert_almost_equal(res, tgt, err_msg=msg) @@ -386,14 +385,14 @@ class TestFitting(TestCase): return x*(x - 1)*(x - 2) # Test exceptions - assert_raises(ValueError, lag.lagfit, [1], [1], -1) - assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) - assert_raises(TypeError, lag.lagfit, [], [1], 0) - assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) - assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) - assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) - assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, lag.lagfit, [1], [1], -1) + assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) + assert_raises(TypeError, lag.lagfit, [], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) + assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) # Test fit x = np.linspace(0, 2) diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index e248f005d..8ac1feb58 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -120,7 +120,6 @@ class TestEvaluation(TestCase): y = [polyval(x, c) for c in Llist] for i in range(10): msg = "At i=%d" % i - ser = np.zeros tgt = y[i] res = leg.legval(x, [0]*i + [1]) assert_almost_equal(res, tgt, err_msg=msg) @@ -390,14 +389,14 @@ class TestFitting(TestCase): return x*(x - 1)*(x - 2) # Test exceptions - assert_raises(ValueError, leg.legfit, [1], [1], -1) - assert_raises(TypeError, leg.legfit, [[1]], [1], 0) - assert_raises(TypeError, leg.legfit, [], [1], 0) - assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) - assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) - assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) - assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, leg.legfit, [1], [1], -1) + assert_raises(TypeError, leg.legfit, [[1]], [1], 0) + assert_raises(TypeError, leg.legfit, [], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) + assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) # Test fit x = np.linspace(0, 2) diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 77092cd2f..c806a8497 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -420,14 +420,14 @@ class TestMisc(TestCase): return x*(x - 1)*(x - 2) # Test exceptions - assert_raises(ValueError, poly.polyfit, [1], [1], -1) - assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) - assert_raises(TypeError, poly.polyfit, [], [1], 0) - assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0) - assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) - assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) - assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, poly.polyfit, [1], [1], -1) + assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) + assert_raises(TypeError, poly.polyfit, [], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0) + assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1]) # Test fit x = np.linspace(0, 2) diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py index c77ee2435..974e2e09a 100644 --- a/numpy/polynomial/tests/test_polyutils.py +++ b/numpy/polynomial/tests/test_polyutils.py @@ -5,7 +5,9 @@ from __future__ import division, absolute_import, print_function import numpy as np import numpy.polynomial.polyutils as pu -from numpy.testing import * +from numpy.testing import ( + TestCase, assert_almost_equal, assert_raises, + assert_equal, assert_, run_module_suite) class TestMisc(TestCase): @@ -101,3 +103,7 @@ class TestDomain(TestCase): tgt = [-1 + 1j, 1 - 1j] res = pu.mapparms(dom1, dom2) assert_almost_equal(res, tgt) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index c2603543d..b089d7742 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -31,6 +31,9 @@ cdef extern from "math.h": double sin(double x) double cos(double x) +cdef extern from "numpy/npy_math.h": + int npy_isfinite(double x) + cdef extern from "mtrand_py_helper.h": object empty_py_bytes(npy_intp length, void **bytes) @@ -127,7 +130,10 @@ import_array() import numpy as np import operator import warnings -from threading import Lock +try: + from threading import Lock +except ImportError: + from dummy_threading import Lock cdef object cont0_array(rk_state *state, rk_cont0 func, object size, object lock): @@ -1068,7 +1074,7 @@ cdef class RandomState: if pop_size is 0: raise ValueError("a must be non-empty") - if None != p: + if p is not None: d = len(p) p = <ndarray>PyArray_ContiguousFromObject(p, NPY_DOUBLE, 1, 1) pix = <double*>PyArray_DATA(p) @@ -1090,7 +1096,7 @@ cdef class RandomState: # Actual sampling if replace: - if None != p: + if p is not None: cdf = p.cumsum() cdf /= cdf[-1] uniform_samples = self.random_sample(shape) @@ -1103,7 +1109,7 @@ cdef class RandomState: raise ValueError("Cannot take a larger sample than " "population when 'replace=False'") - if None != p: + if p is not None: if np.count_nonzero(p > 0) < size: raise ValueError("Fewer non-zero entries in p than size") n_uniq = 0 @@ -1220,14 +1226,19 @@ cdef class RandomState: """ cdef ndarray olow, ohigh, odiff - cdef double flow, fhigh + cdef double flow, fhigh, fscale cdef object temp flow = PyFloat_AsDouble(low) fhigh = PyFloat_AsDouble(high) + + fscale = fhigh - flow + if not npy_isfinite(fscale): + raise OverflowError('Range exceeds valid bounds') + if not PyErr_Occurred(): return cont2_array_sc(self.internal_state, rk_uniform, size, flow, - fhigh-flow, self.lock) + fscale, self.lock) PyErr_Clear() olow = <ndarray>PyArray_FROM_OTF(low, NPY_DOUBLE, NPY_ARRAY_ALIGNED) @@ -3752,8 +3763,9 @@ cdef class RandomState: Parameters ---------- - lam : float - Expectation of interval, should be >= 0. + lam : float or sequence of float + Expectation of interval, should be >= 0. A sequence of expectation + intervals must be broadcastable over the requested size. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. Default is None, in which case a @@ -3793,6 +3805,10 @@ cdef class RandomState: >>> count, bins, ignored = plt.hist(s, 14, normed=True) >>> plt.show() + Draw each 100 values for lambda 100 and 500: + + >>> s = np.random.poisson(lam=(100., 500.), size=(100, 2)) + """ cdef ndarray olam cdef double flam diff --git a/numpy/random/setup.py b/numpy/random/setup.py index 55cca69da..33c12975b 100644 --- a/numpy/random/setup.py +++ b/numpy/random/setup.py @@ -45,12 +45,11 @@ def configuration(parent_package='',top_path=None): ['mtrand.c', 'randomkit.c', 'initarray.c', 'distributions.c']]+[generate_libraries], libraries=libs, - depends = [join('mtrand', '*.h'), - join('mtrand', '*.pyx'), - join('mtrand', '*.pxi'), - ], - define_macros = defs, - ) + depends=[join('mtrand', '*.h'), + join('mtrand', '*.pyx'), + join('mtrand', '*.pxi'),], + define_macros=defs, + ) config.add_data_files(('.', join('mtrand', 'randomkit.h'))) config.add_data_dir('tests') diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index b64c9d6cd..897a8e6f0 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -6,6 +6,7 @@ from numpy.testing import ( assert_warns) from numpy import random from numpy.compat import asbytes +import sys class TestSeed(TestCase): def test_scalar(self): @@ -60,7 +61,7 @@ class TestMultinomial(TestCase): random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) def test_int_negative_interval(self): - assert_( -5 <= random.randint(-5, -1) < -1) + assert_(-5 <= random.randint(-5, -1) < -1) x = random.randint(-5, -1, 5) assert_(np.all(-5 <= x)) assert_(np.all(x < -1)) @@ -68,15 +69,15 @@ class TestMultinomial(TestCase): def test_size(self): # gh-3173 p = [0.5, 0.5] - assert_equal(np.random.multinomial(1 ,p, np.uint32(1)).shape, (1, 2)) - assert_equal(np.random.multinomial(1 ,p, np.uint32(1)).shape, (1, 2)) - assert_equal(np.random.multinomial(1 ,p, np.uint32(1)).shape, (1, 2)) - assert_equal(np.random.multinomial(1 ,p, [2, 2]).shape, (2, 2, 2)) - assert_equal(np.random.multinomial(1 ,p, (2, 2)).shape, (2, 2, 2)) - assert_equal(np.random.multinomial(1 ,p, np.array((2, 2))).shape, + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape, (2, 2, 2)) - assert_raises(TypeError, np.random.multinomial, 1 , p, + assert_raises(TypeError, np.random.multinomial, 1, p, np.float(1)) @@ -93,17 +94,16 @@ class TestSetState(TestCase): assert_(np.all(old == new)) def test_gaussian_reset(self): - """ Make sure the cached every-other-Gaussian is reset. - """ + # Make sure the cached every-other-Gaussian is reset. old = self.prng.standard_normal(size=3) self.prng.set_state(self.state) new = self.prng.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): - """ When the state is saved with a cached Gaussian, make sure the cached - Gaussian is restored. - """ + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + self.prng.standard_normal() state = self.prng.get_state() old = self.prng.standard_normal(size=3) @@ -112,9 +112,8 @@ class TestSetState(TestCase): assert_(np.all(old == new)) def test_backwards_compatibility(self): - """ Make sure we can accept old state tuples that do not have the cached - Gaussian value. - """ + # Make sure we can accept old state tuples that do not have the + # cached Gaussian value. old_state = self.state[:-2] x1 = self.prng.standard_normal(size=16) self.prng.set_state(old_state) @@ -125,56 +124,55 @@ class TestSetState(TestCase): assert_(np.all(x1 == x3)) def test_negative_binomial(self): - """ Ensure that the negative binomial results take floating point - arguments without truncation. - """ + # Ensure that the negative binomial results take floating point + # arguments without truncation. self.prng.negative_binomial(0.5, 0.5) class TestRandomDist(TestCase): - """ Make sure the random distrobution return the correct value for a - given seed - """ + # Make sure the random distrobution return the correct value for a + # given seed + def setUp(self): self.seed = 1234567890 def test_rand(self): np.random.seed(self.seed) actual = np.random.rand(3, 2) - desired = np.array([[ 0.61879477158567997, 0.59162362775974664], - [ 0.88868358904449662, 0.89165480011560816], - [ 0.4575674820298663, 0.7781880808593471 ]]) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_randn(self): np.random.seed(self.seed) actual = np.random.randn(3, 2) - desired = np.array([[ 1.34016345771863121, 1.73759122771936081], - [ 1.498988344300628, -0.2286433324536169 ], - [ 2.031033998682787, 2.17032494605655257]]) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_randint(self): np.random.seed(self.seed) actual = np.random.randint(-99, 99, size=(3, 2)) - desired = np.array([[ 31, 3], - [-52, 41], - [-48, -66]]) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) np.testing.assert_array_equal(actual, desired) def test_random_integers(self): np.random.seed(self.seed) actual = np.random.random_integers(-99, 99, size=(3, 2)) - desired = np.array([[ 31, 3], - [-52, 41], - [-48, -66]]) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) np.testing.assert_array_equal(actual, desired) def test_random_sample(self): np.random.seed(self.seed) actual = np.random.random_sample((3, 2)) - desired = np.array([[ 0.61879477158567997, 0.59162362775974664], - [ 0.88868358904449662, 0.89165480011560816], - [ 0.4575674820298663, 0.7781880808593471 ]]) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_choice_uniform_replace(self): @@ -304,9 +302,10 @@ class TestRandomDist(TestCase): def test_beta(self): np.random.seed(self.seed) actual = np.random.beta(.1, .9, size=(3, 2)) - desired = np.array([[ 1.45341850513746058e-02, 5.31297615662868145e-04], - [ 1.85366619058432324e-06, 4.19214516800110563e-03], - [ 1.58405155108498093e-04, 1.26252891949397652e-04]]) + desired = np.array( + [[1.45341850513746058e-02, 5.31297615662868145e-04], + [1.85366619058432324e-06, 4.19214516800110563e-03], + [1.58405155108498093e-04, 1.26252891949397652e-04]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_binomial(self): @@ -320,26 +319,26 @@ class TestRandomDist(TestCase): def test_chisquare(self): np.random.seed(self.seed) actual = np.random.chisquare(50, size=(3, 2)) - desired = np.array([[ 63.87858175501090585, 68.68407748911370447], - [ 65.77116116901505904, 47.09686762438974483], - [ 72.3828403199695174, 74.18408615260374006]]) + desired = np.array([[63.87858175501090585, 68.68407748911370447], + [65.77116116901505904, 47.09686762438974483], + [72.3828403199695174, 74.18408615260374006]]) np.testing.assert_array_almost_equal(actual, desired, decimal=13) def test_dirichlet(self): np.random.seed(self.seed) - alpha = np.array([51.72840233779265162, 39.74494232180943953]) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) actual = np.random.mtrand.dirichlet(alpha, size=(3, 2)) - desired = np.array([[[ 0.54539444573611562, 0.45460555426388438], - [ 0.62345816822039413, 0.37654183177960598]], - [[ 0.55206000085785778, 0.44793999914214233], - [ 0.58964023305154301, 0.41035976694845688]], - [[ 0.59266909280647828, 0.40733090719352177], - [ 0.56974431743975207, 0.43025568256024799]]]) + desired = np.array([[[0.54539444573611562, 0.45460555426388438], + [0.62345816822039413, 0.37654183177960598]], + [[0.55206000085785778, 0.44793999914214233], + [0.58964023305154301, 0.41035976694845688]], + [[0.59266909280647828, 0.40733090719352177], + [0.56974431743975207, 0.43025568256024799]]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_dirichlet_size(self): # gh-3173 - p = np.array([51.72840233779265162, 39.74494232180943953]) + p = np.array([51.72840233779265162, 39.74494232180943953]) assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) @@ -352,49 +351,49 @@ class TestRandomDist(TestCase): def test_exponential(self): np.random.seed(self.seed) actual = np.random.exponential(1.1234, size=(3, 2)) - desired = np.array([[ 1.08342649775011624, 1.00607889924557314], - [ 2.46628830085216721, 2.49668106809923884], - [ 0.68717433461363442, 1.69175666993575979]]) + desired = np.array([[1.08342649775011624, 1.00607889924557314], + [2.46628830085216721, 2.49668106809923884], + [0.68717433461363442, 1.69175666993575979]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_f(self): np.random.seed(self.seed) actual = np.random.f(12, 77, size=(3, 2)) - desired = np.array([[ 1.21975394418575878, 1.75135759791559775], - [ 1.44803115017146489, 1.22108959480396262], - [ 1.02176975757740629, 1.34431827623300415]]) + desired = np.array([[1.21975394418575878, 1.75135759791559775], + [1.44803115017146489, 1.22108959480396262], + [1.02176975757740629, 1.34431827623300415]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_gamma(self): np.random.seed(self.seed) actual = np.random.gamma(5, 3, size=(3, 2)) - desired = np.array([[ 24.60509188649287182, 28.54993563207210627], - [ 26.13476110204064184, 12.56988482927716078], - [ 31.71863275789960568, 33.30143302795922011]]) + desired = np.array([[24.60509188649287182, 28.54993563207210627], + [26.13476110204064184, 12.56988482927716078], + [31.71863275789960568, 33.30143302795922011]]) np.testing.assert_array_almost_equal(actual, desired, decimal=14) def test_geometric(self): np.random.seed(self.seed) actual = np.random.geometric(.123456789, size=(3, 2)) - desired = np.array([[ 8, 7], - [17, 17], - [ 5, 12]]) + desired = np.array([[8, 7], + [17, 17], + [5, 12]]) np.testing.assert_array_equal(actual, desired) def test_gumbel(self): np.random.seed(self.seed) - actual = np.random.gumbel(loc = .123456789, scale = 2.0, size = (3, 2)) - desired = np.array([[ 0.19591898743416816, 0.34405539668096674], - [-1.4492522252274278, -1.47374816298446865], - [ 1.10651090478803416, -0.69535848626236174]]) + actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.19591898743416816, 0.34405539668096674], + [-1.4492522252274278, -1.47374816298446865], + [1.10651090478803416, -0.69535848626236174]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_hypergeometric(self): np.random.seed(self.seed) actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) desired = np.array([[10, 10], - [10, 10], - [ 9, 9]]) + [10, 10], + [9, 9]]) np.testing.assert_array_equal(actual, desired) # Test nbad = 0 @@ -418,49 +417,49 @@ class TestRandomDist(TestCase): def test_laplace(self): np.random.seed(self.seed) actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[ 0.66599721112760157, 0.52829452552221945], - [ 3.12791959514407125, 3.18202813572992005], - [-0.05391065675859356, 1.74901336242837324]]) + desired = np.array([[0.66599721112760157, 0.52829452552221945], + [3.12791959514407125, 3.18202813572992005], + [-0.05391065675859356, 1.74901336242837324]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_logistic(self): np.random.seed(self.seed) actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[ 1.09232835305011444, 0.8648196662399954 ], - [ 4.27818590694950185, 4.33897006346929714], - [-0.21682183359214885, 2.63373365386060332]]) + desired = np.array([[1.09232835305011444, 0.8648196662399954], + [4.27818590694950185, 4.33897006346929714], + [-0.21682183359214885, 2.63373365386060332]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_lognormal(self): np.random.seed(self.seed) actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) - desired = np.array([[ 16.50698631688883822, 36.54846706092654784], - [ 22.67886599981281748, 0.71617561058995771], - [ 65.72798501792723869, 86.84341601437161273]]) + desired = np.array([[16.50698631688883822, 36.54846706092654784], + [22.67886599981281748, 0.71617561058995771], + [65.72798501792723869, 86.84341601437161273]]) np.testing.assert_array_almost_equal(actual, desired, decimal=13) def test_logseries(self): np.random.seed(self.seed) actual = np.random.logseries(p=.923456789, size=(3, 2)) - desired = np.array([[ 2, 2], - [ 6, 17], - [ 3, 6]]) + desired = np.array([[2, 2], + [6, 17], + [3, 6]]) np.testing.assert_array_equal(actual, desired) def test_multinomial(self): np.random.seed(self.seed) actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2)) desired = np.array([[[4, 3, 5, 4, 2, 2], - [5, 2, 8, 2, 2, 1]], - [[3, 4, 3, 6, 0, 4], - [2, 1, 4, 3, 6, 4]], - [[4, 4, 2, 5, 2, 3], - [4, 3, 4, 2, 3, 4]]]) + [5, 2, 8, 2, 2, 1]], + [[3, 4, 3, 6, 0, 4], + [2, 1, 4, 3, 6, 4]], + [[4, 4, 2, 5, 2, 3], + [4, 3, 4, 2, 3, 4]]]) np.testing.assert_array_equal(actual, desired) def test_multivariate_normal(self): np.random.seed(self.seed) - mean= (.123456789, 10) + mean = (.123456789, 10) # Hmm... not even symmetric. cov = [[1, 0], [1, 0]] size = (3, 2) @@ -470,7 +469,7 @@ class TestRandomDist(TestCase): [[-2.29186329304599745, 10.], [-1.77505606019580053, 10.]], [[-0.54970369430044119, 10.], - [ 0.29768848031692957, 10.]]]) + [0.29768848031692957, 10.]]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) # Check for default size, was raising deprecation warning @@ -479,50 +478,50 @@ class TestRandomDist(TestCase): np.testing.assert_array_almost_equal(actual, desired, decimal=15) # Check that non positive-semidefinite covariance raises warning - mean= [0, 0] + mean = [0, 0] cov = [[1, 1 + 1e-10], [1 + 1e-10, 1]] - rng = np.random.multivariate_normal assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) def test_negative_binomial(self): np.random.seed(self.seed) actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2)) desired = np.array([[848, 841], - [892, 611], - [779, 647]]) + [892, 611], + [779, 647]]) np.testing.assert_array_equal(actual, desired) def test_noncentral_chisquare(self): np.random.seed(self.seed) - actual = np.random.noncentral_chisquare(df = 5, nonc = 5, size = (3, 2)) - desired = np.array([[ 23.91905354498517511, 13.35324692733826346], - [ 31.22452661329736401, 16.60047399466177254], - [ 5.03461598262724586, 17.94973089023519464]]) + actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[23.91905354498517511, 13.35324692733826346], + [31.22452661329736401, 16.60047399466177254], + [5.03461598262724586, 17.94973089023519464]]) np.testing.assert_array_almost_equal(actual, desired, decimal=14) def test_noncentral_f(self): np.random.seed(self.seed) - actual = np.random.noncentral_f(dfnum = 5, dfden = 2, nonc = 1, - size = (3, 2)) - desired = np.array([[ 1.40598099674926669, 0.34207973179285761], - [ 3.57715069265772545, 7.92632662577829805], - [ 0.43741599463544162, 1.1774208752428319 ]]) + actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[1.40598099674926669, 0.34207973179285761], + [3.57715069265772545, 7.92632662577829805], + [0.43741599463544162, 1.1774208752428319]]) np.testing.assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): np.random.seed(self.seed) - actual = np.random.normal(loc = .123456789, scale = 2.0, size = (3, 2)) - desired = np.array([[ 2.80378370443726244, 3.59863924443872163], - [ 3.121433477601256, -0.33382987590723379], - [ 4.18552478636557357, 4.46410668111310471]]) + actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[2.80378370443726244, 3.59863924443872163], + [3.121433477601256, -0.33382987590723379], + [4.18552478636557357, 4.46410668111310471]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_pareto(self): np.random.seed(self.seed) - actual = np.random.pareto(a =.123456789, size = (3, 2)) - desired = np.array([[ 2.46852460439034849e+03, 1.41286880810518346e+03], - [ 5.28287797029485181e+07, 6.57720981047328785e+07], - [ 1.40840323350391515e+02, 1.98390255135251704e+05]]) + actual = np.random.pareto(a=.123456789, size=(3, 2)) + desired = np.array( + [[2.46852460439034849e+03, 1.41286880810518346e+03], + [5.28287797029485181e+07, 6.57720981047328785e+07], + [1.40840323350391515e+02, 1.98390255135251704e+05]]) # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this # matrix differs by 24 nulps. Discussion: # http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html @@ -533,7 +532,7 @@ class TestRandomDist(TestCase): def test_poisson(self): np.random.seed(self.seed) - actual = np.random.poisson(lam = .123456789, size=(3, 2)) + actual = np.random.poisson(lam=.123456789, size=(3, 2)) desired = np.array([[0, 0], [1, 0], [0, 0]]) @@ -549,84 +548,95 @@ class TestRandomDist(TestCase): def test_power(self): np.random.seed(self.seed) - actual = np.random.power(a =.123456789, size = (3, 2)) - desired = np.array([[ 0.02048932883240791, 0.01424192241128213], - [ 0.38446073748535298, 0.39499689943484395], - [ 0.00177699707563439, 0.13115505880863756]]) + actual = np.random.power(a=.123456789, size=(3, 2)) + desired = np.array([[0.02048932883240791, 0.01424192241128213], + [0.38446073748535298, 0.39499689943484395], + [0.00177699707563439, 0.13115505880863756]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_rayleigh(self): np.random.seed(self.seed) - actual = np.random.rayleigh(scale = 10, size = (3, 2)) - desired = np.array([[ 13.8882496494248393, 13.383318339044731 ], - [ 20.95413364294492098, 21.08285015800712614], - [ 11.06066537006854311, 17.35468505778271009]]) + actual = np.random.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[13.8882496494248393, 13.383318339044731], + [20.95413364294492098, 21.08285015800712614], + [11.06066537006854311, 17.35468505778271009]]) np.testing.assert_array_almost_equal(actual, desired, decimal=14) def test_standard_cauchy(self): np.random.seed(self.seed) - actual = np.random.standard_cauchy(size = (3, 2)) - desired = np.array([[ 0.77127660196445336, -6.55601161955910605], - [ 0.93582023391158309, -2.07479293013759447], - [-4.74601644297011926, 0.18338989290760804]]) + actual = np.random.standard_cauchy(size=(3, 2)) + desired = np.array([[0.77127660196445336, -6.55601161955910605], + [0.93582023391158309, -2.07479293013759447], + [-4.74601644297011926, 0.18338989290760804]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_standard_exponential(self): np.random.seed(self.seed) - actual = np.random.standard_exponential(size = (3, 2)) - desired = np.array([[ 0.96441739162374596, 0.89556604882105506], - [ 2.1953785836319808, 2.22243285392490542], - [ 0.6116915921431676, 1.50592546727413201]]) + actual = np.random.standard_exponential(size=(3, 2)) + desired = np.array([[0.96441739162374596, 0.89556604882105506], + [2.1953785836319808, 2.22243285392490542], + [0.6116915921431676, 1.50592546727413201]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_standard_gamma(self): np.random.seed(self.seed) - actual = np.random.standard_gamma(shape = 3, size = (3, 2)) - desired = np.array([[ 5.50841531318455058, 6.62953470301903103], - [ 5.93988484943779227, 2.31044849402133989], - [ 7.54838614231317084, 8.012756093271868 ]]) + actual = np.random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[5.50841531318455058, 6.62953470301903103], + [5.93988484943779227, 2.31044849402133989], + [7.54838614231317084, 8.012756093271868]]) np.testing.assert_array_almost_equal(actual, desired, decimal=14) def test_standard_normal(self): np.random.seed(self.seed) - actual = np.random.standard_normal(size = (3, 2)) - desired = np.array([[ 1.34016345771863121, 1.73759122771936081], - [ 1.498988344300628, -0.2286433324536169 ], - [ 2.031033998682787, 2.17032494605655257]]) + actual = np.random.standard_normal(size=(3, 2)) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_standard_t(self): np.random.seed(self.seed) - actual = np.random.standard_t(df = 10, size = (3, 2)) - desired = np.array([[ 0.97140611862659965, -0.08830486548450577], - [ 1.36311143689505321, -0.55317463909867071], - [-0.18473749069684214, 0.61181537341755321]]) + actual = np.random.standard_t(df=10, size=(3, 2)) + desired = np.array([[0.97140611862659965, -0.08830486548450577], + [1.36311143689505321, -0.55317463909867071], + [-0.18473749069684214, 0.61181537341755321]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_triangular(self): np.random.seed(self.seed) - actual = np.random.triangular(left = 5.12, mode = 10.23, right = 20.34, - size = (3, 2)) - desired = np.array([[ 12.68117178949215784, 12.4129206149193152 ], - [ 16.20131377335158263, 16.25692138747600524], - [ 11.20400690911820263, 14.4978144835829923 ]]) + actual = np.random.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[12.68117178949215784, 12.4129206149193152], + [16.20131377335158263, 16.25692138747600524], + [11.20400690911820263, 14.4978144835829923]]) np.testing.assert_array_almost_equal(actual, desired, decimal=14) def test_uniform(self): np.random.seed(self.seed) - actual = np.random.uniform(low = 1.23, high=10.54, size = (3, 2)) - desired = np.array([[ 6.99097932346268003, 6.73801597444323974], - [ 9.50364421400426274, 9.53130618907631089], - [ 5.48995325769805476, 8.47493103280052118]]) + actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[6.99097932346268003, 6.73801597444323974], + [9.50364421400426274, 9.53130618907631089], + [5.48995325769805476, 8.47493103280052118]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = np.random.uniform + np.testing.assert_raises(OverflowError, func, -np.inf, 0) + np.testing.assert_raises(OverflowError, func, 0, np.inf) + np.testing.assert_raises(OverflowError, func, fmin, fmax) + + # (fmax / 1e17) - fmin is within range, so this should not throw + np.random.uniform(low=fmin, high=fmax / 1e17) def test_vonmises(self): np.random.seed(self.seed) - actual = np.random.vonmises(mu = 1.23, kappa = 1.54, size = (3, 2)) - desired = np.array([[ 2.28567572673902042, 2.89163838442285037], - [ 0.38198375564286025, 2.57638023113890746], - [ 1.19153771588353052, 1.83509849681825354]]) + actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[2.28567572673902042, 2.89163838442285037], + [0.38198375564286025, 2.57638023113890746], + [1.19153771588353052, 1.83509849681825354]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_vonmises_small(self): @@ -637,31 +647,31 @@ class TestRandomDist(TestCase): def test_wald(self): np.random.seed(self.seed) - actual = np.random.wald(mean = 1.23, scale = 1.54, size = (3, 2)) - desired = np.array([[ 3.82935265715889983, 5.13125249184285526], - [ 0.35045403618358717, 1.50832396872003538], - [ 0.24124319895843183, 0.22031101461955038]]) + actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[3.82935265715889983, 5.13125249184285526], + [0.35045403618358717, 1.50832396872003538], + [0.24124319895843183, 0.22031101461955038]]) np.testing.assert_array_almost_equal(actual, desired, decimal=14) def test_weibull(self): np.random.seed(self.seed) - actual = np.random.weibull(a = 1.23, size = (3, 2)) - desired = np.array([[ 0.97097342648766727, 0.91422896443565516], - [ 1.89517770034962929, 1.91414357960479564], - [ 0.67057783752390987, 1.39494046635066793]]) + actual = np.random.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.97097342648766727, 0.91422896443565516], + [1.89517770034962929, 1.91414357960479564], + [0.67057783752390987, 1.39494046635066793]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_zipf(self): np.random.seed(self.seed) - actual = np.random.zipf(a = 1.23, size = (3, 2)) + actual = np.random.zipf(a=1.23, size=(3, 2)) desired = np.array([[66, 29], - [ 1, 1], - [ 3, 13]]) + [1, 1], + [3, 13]]) np.testing.assert_array_equal(actual, desired) -class TestThread: - """ make sure each state produces the same sequence even in threads """ +class TestThread(object): + # make sure each state produces the same sequence even in threads def setUp(self): self.seeds = range(4) @@ -681,7 +691,11 @@ class TestThread: for s, o in zip(self.seeds, out2): function(np.random.RandomState(s), o) - np.testing.assert_array_equal(out1, out2) + # these platforms change x87 fpu precision mode in threads + if (np.intp().dtype.itemsize == 4 and sys.platform == "win32"): + np.testing.assert_array_almost_equal(out1, out2) + else: + np.testing.assert_array_equal(out1, out2) def test_normal(self): def gen_random(state, out): diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py index 1bba5d91d..ccffd033e 100644 --- a/numpy/random/tests/test_regression.py +++ b/numpy/random/tests/test_regression.py @@ -44,7 +44,7 @@ class TestRegression(TestCase): b = np.random.permutation(long(12)) assert_array_equal(a, b) - def test_randint_range(self) : + def test_randint_range(self): # Test for ticket #1690 lmax = np.iinfo('l').max lmin = np.iinfo('l').min diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py index a06d559e7..85e9c0f0e 100644 --- a/numpy/testing/nosetester.py +++ b/numpy/testing/nosetester.py @@ -90,7 +90,7 @@ def run_module_suite(file_to_run=None, argv=None): argv: list of strings Arguments to be passed to the nose test runner. ``argv[0]`` is ignored. All command line arguments accepted by ``nosetests`` - will work. + will work. If it is the default value None, sys.argv is used. .. versionadded:: 1.9.0 @@ -117,7 +117,7 @@ def run_module_suite(file_to_run=None, argv=None): raise AssertionError if argv is None: - argv = ['', file_to_run] + argv = sys.argv + [file_to_run] else: argv = argv + [file_to_run] diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index aa0a2669f..34608125d 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -244,6 +244,14 @@ class TestArrayAlmostEqual(_GenericTest, unittest.TestCase): self.assertRaises(AssertionError, lambda : self._assert_func(a, b)) + def test_subclass(self): + a = np.array([[1., 2.], [3., 4.]]) + b = np.ma.masked_array([[1., 2.], [0., 4.]], + [[False, False], [True, False]]) + assert_array_almost_equal(a, b) + assert_array_almost_equal(b, a) + assert_array_almost_equal(b, b) + class TestAlmostEqual(_GenericTest, unittest.TestCase): def setUp(self): self._assert_func = assert_almost_equal @@ -449,6 +457,15 @@ class TestAssertAllclose(unittest.TestCase): # Should not raise: assert_allclose(a, a) + def test_report_fail_percentage(self): + a = np.array([1, 1, 1, 1]) + b = np.array([1, 1, 1, 2]) + try: + assert_allclose(a, b) + msg = '' + except AssertionError as exc: + msg = exc.args[0] + self.assertTrue("mismatch 25.0%" in msg) class TestArrayAlmostEqualNulp(unittest.TestCase): @dec.knownfailureif(True, "Github issue #347") diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index ddf21e2bc..3b20f9238 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -10,6 +10,9 @@ import re import operator import warnings from functools import partial +import shutil +import contextlib +from tempfile import mkdtemp from .nosetester import import_nose from numpy.core import float32, empty, arange, array_repr, ndarray @@ -156,7 +159,7 @@ else: """ Return memory usage of running python. [Not implemented]""" raise NotImplementedError -if os.name=='nt' and sys.version[:3] > '2.3': +if os.name=='nt': # Code "stolen" from enthought/debug/memusage.py def GetPerformanceAttributes(object, counter, instance = None, inum=-1, format = None, machine=None): @@ -219,7 +222,7 @@ def build_err_msg(arrays, err_msg, header='Items are not equal:', def assert_equal(actual,desired,err_msg='',verbose=True): """ - Raise an assertion if two objects are not equal. + Raises an AssertionError if two objects are not equal. Given two objects (scalars, lists, tuples, dictionaries or numpy arrays), check that all elements of these objects are equal. An exception is raised @@ -371,7 +374,8 @@ def print_assert_equal(test_string, actual, desired): def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): """ - Raise an assertion if two items are not equal up to desired precision. + Raises an AssertionError if two items are not equal up to desired + precision. .. note:: It is recommended to use one of `assert_allclose`, `assert_array_almost_equal_nulp` or `assert_array_max_ulp` @@ -488,7 +492,8 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): """ - Raise an assertion if two items are not equal up to significant digits. + Raises an AssertionError if two items are not equal up to significant + digits. .. note:: It is recommended to use one of `assert_allclose`, `assert_array_almost_equal_nulp` or `assert_array_max_ulp` @@ -669,7 +674,7 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, def assert_array_equal(x, y, err_msg='', verbose=True): """ - Raise an assertion if two array_like objects are not equal. + Raises an AssertionError if two array_like objects are not equal. Given two array_like objects, check that the shape is equal and all elements of these objects are equal. An exception is raised at @@ -735,7 +740,8 @@ def assert_array_equal(x, y, err_msg='', verbose=True): def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): """ - Raise an assertion if two objects are not equal up to desired precision. + Raises an AssertionError if two objects are not equal up to desired + precision. .. note:: It is recommended to use one of `assert_allclose`, `assert_array_almost_equal_nulp` or `assert_array_max_ulp` @@ -823,7 +829,7 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): # make sure y is an inexact type to avoid abs(MIN_INT); will cause # casting of x later. dtype = result_type(y, 1.) - y = array(y, dtype=dtype, copy=False) + y = array(y, dtype=dtype, copy=False, subok=True) z = abs(x-y) if not issubdtype(z.dtype, number): @@ -838,7 +844,8 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): def assert_array_less(x, y, err_msg='', verbose=True): """ - Raise an assertion if two array_like objects are not ordered by less than. + Raises an AssertionError if two array_like objects are not ordered by less + than. Given two array_like objects, check that the shape is equal and all elements of the first object are strictly smaller than those of the @@ -1240,7 +1247,8 @@ def _assert_valid_refcount(op): def assert_allclose(actual, desired, rtol=1e-7, atol=0, err_msg='', verbose=True): """ - Raise an assertion if two objects are not equal up to desired tolerance. + Raises an AssertionError if two objects are not equal up to desired + tolerance. The test is equivalent to ``allclose(actual, desired, rtol, atol)``. It compares the difference between `actual` and `desired` to @@ -1281,7 +1289,7 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, """ import numpy as np def compare(x, y): - return np.allclose(x, y, rtol=rtol, atol=atol) + return np.core.numeric._allclose_points(x, y, rtol=rtol, atol=atol) actual, desired = np.asanyarray(actual), np.asanyarray(desired) header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol) @@ -1692,3 +1700,16 @@ def _gen_alignment_data(dtype=float32, type='binary', max_size=24): class IgnoreException(Exception): "Ignoring this exception due to disabled feature" + + +@contextlib.contextmanager +def tempdir(*args, **kwargs): + """Context manager to provide a temporary test folder. + + All arguments are passed as this to the underlying tempfile.mkdtemp + function. + + """ + tmpdir = mkdtemp(*args, **kwargs) + yield tmpdir + shutil.rmtree(tmpdir) diff --git a/pavement.py b/pavement.py index c0b5cb2d4..9e3cb065c 100644 --- a/pavement.py +++ b/pavement.py @@ -99,10 +99,10 @@ finally: #----------------------------------- # Source of the release notes -RELEASE_NOTES = 'doc/release/1.9.0-notes.rst' +RELEASE_NOTES = 'doc/release/1.10.0-notes.rst' # Start/end of the log (from git) -LOG_START = 'v1.8.0b1' +LOG_START = 'v1.9.0b1' LOG_END = 'master' @@ -48,7 +48,7 @@ Operating System :: MacOS """ MAJOR = 1 -MINOR = 9 +MINOR = 10 MICRO = 0 ISRELEASED = False VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) diff --git a/site.cfg.example b/site.cfg.example index 714ab6311..4a59f10e2 100644 --- a/site.cfg.example +++ b/site.cfg.example @@ -126,7 +126,7 @@ # better performance. Note that the AMD library has nothing to do with AMD # (Advanced Micro Devices), the CPU company. # -# UMFPACK is not needed for numpy or scipy. +# UMFPACK is not used by numpy. # # http://www.cise.ufl.edu/research/sparse/umfpack/ # http://www.cise.ufl.edu/research/sparse/amd/ @@ -141,7 +141,7 @@ # FFT libraries # ------------- # There are two FFT libraries that we can configure here: FFTW (2 and 3) and djbfft. -# Note that these libraries are not needed for numpy or scipy. +# Note that these libraries are not used by for numpy or scipy. # # http://fftw.org/ # http://cr.yp.to/djbfft.html diff --git a/tools/allocation_tracking/track_allocations.py b/tools/allocation_tracking/track_allocations.py index 2006217c2..dfc354eb5 100644 --- a/tools/allocation_tracking/track_allocations.py +++ b/tools/allocation_tracking/track_allocations.py @@ -1,6 +1,7 @@ from __future__ import division, absolute_import, print_function import numpy as np +import gc import inspect from alloc_hook import NumpyAllocHook @@ -35,12 +36,21 @@ class AllocationTracker(object): self.numpy_hook.__exit__() def hook(self, inptr, outptr, size): + # minimize the chances that the garbage collector kicks in during a + # cython __dealloc__ call and causes a double delete of the current + # object. To avoid this fully the hook would have to avoid all python + # api calls, e.g. by being implemented in C like python 3.4's + # tracemalloc module + gc_on = gc.isenabled() + gc.disable() if outptr == 0: # it's a free self.free_cb(inptr) elif inptr != 0: # realloc self.realloc_cb(inptr, outptr, size) else: # malloc self.alloc_cb(outptr, size) + if gc_on: + gc.enable() def alloc_cb(self, ptr, size): if size >= self.threshold: diff --git a/tools/swig/numpy.i b/tools/swig/numpy.i index e250e78bf..217acd5bf 100644 --- a/tools/swig/numpy.i +++ b/tools/swig/numpy.i @@ -872,7 +872,7 @@ (PyArrayObject* array=NULL, int is_new_object=0) { npy_intp size[2] = { -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, + array = obj_to_array_fortran_allow_conversion($input, DATA_TYPECODE, &is_new_object); if (!array || !require_dimensions(array, 2) || @@ -1106,7 +1106,7 @@ (PyArrayObject* array=NULL, int is_new_object=0) { npy_intp size[3] = { -1, -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, + array = obj_to_array_fortran_allow_conversion($input, DATA_TYPECODE, &is_new_object); if (!array || !require_dimensions(array, 3) || @@ -1345,7 +1345,7 @@ (PyArrayObject* array=NULL, int is_new_object=0) { npy_intp size[4] = { -1, -1, -1 , -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, + array = obj_to_array_fortran_allow_conversion($input, DATA_TYPECODE, &is_new_object); if (!array || !require_dimensions(array, 4) || !require_size(array, size, 4) || !require_fortran(array)) SWIG_fail; diff --git a/tools/test-installed-numpy.py b/tools/test-installed-numpy.py index 0174e0708..26a50b2fa 100644 --- a/tools/test-installed-numpy.py +++ b/tools/test-installed-numpy.py @@ -38,7 +38,7 @@ import numpy # Check that NPY_RELAXED_STRIDES_CHECKING is active when set. # The same flags check is also used in the tests to switch behavior. -if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "0") != "0"): +if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0"): if not numpy.ones((10, 1), order='C').flags.f_contiguous: print('NPY_RELAXED_STRIDES_CHECKING set, but not active.') sys.exit(1) diff --git a/tools/travis-test.sh b/tools/travis-test.sh index de078edf7..3981c3b58 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -1,6 +1,9 @@ #!/bin/sh set -ex +# travis boxes give you 1.5 cpus +export NPY_NUM_BUILD_JOBS=2 + # setup env if [ -r /usr/lib/libeatmydata/libeatmydata.so ]; then # much faster package installation @@ -59,9 +62,9 @@ setup_bento() cd .. # Waf - wget http://waf.googlecode.com/files/waf-1.7.13.tar.bz2 - tar xjvf waf-1.7.13.tar.bz2 - cd waf-1.7.13 + wget http://ftp.waf.io/pub/release/waf-1.7.16.tar.bz2 + tar xjvf waf-1.7.16.tar.bz2 + cd waf-1.7.16 python waf-light export WAFDIR=$PWD cd .. @@ -25,7 +25,7 @@ # installed and that they can be run as 'python2.7', 'python3.3', etc. [tox] -envlist = py26,py27,py32,py33,py27-monolithic,py33-monolithic,py27-relaxed-strides,py33-relaxed-strides +envlist = py26,py27,py32,py33,py27-monolithic,py33-monolithic,py27-not-relaxed-strides,py33-not-relaxed-strides [testenv] deps= @@ -41,13 +41,13 @@ env=NPY_SEPARATE_COMPILATION=0 basepython=python3.3 env=NPY_SEPARATE_COMPILATION=0 -[testenv:py27-relaxed-strides] +[testenv:py27-not-relaxed-strides] basepython=python2.7 -env=NPY_RELAXED_STRIDES_CHECKING=1 +env=NPY_RELAXED_STRIDES_CHECKING=0 -[testenv:py33-relaxed-strides] +[testenv:py33-not-relaxed-strides] basepython=python3.3 -env=NPY_RELAXED_STRIDES_CHECKING=1 +env=NPY_RELAXED_STRIDES_CHECKING=0 # Not run by default. Set up the way you want then use 'tox -e debug' # if you want it: |