diff options
author | Travis Oliphant <oliphant@enthought.com> | 2009-08-28 15:36:42 +0000 |
---|---|---|
committer | Travis Oliphant <oliphant@enthought.com> | 2009-08-28 15:36:42 +0000 |
commit | 2b01ee6b966b9ca298247e6717e3a5be16a92970 (patch) | |
tree | 6bbb8ee8eebdfe2ef3eb26f13994193b313c6fe7 | |
parent | c2191bc97da8a0879cec8d3e9a7a93fe9e66fcd8 (diff) | |
parent | fddd4b9c3b8f18ba7cf386f766b70ec3328b1c69 (diff) | |
download | numpy-2b01ee6b966b9ca298247e6717e3a5be16a92970.tar.gz |
Re-base the date-time branch back to the trunk.
141 files changed, 10894 insertions, 3098 deletions
diff --git a/doc/HOWTO_BUILD_DOCS.txt b/doc/HOWTO_BUILD_DOCS.txt index fd058d9bd..8c1722712 100644 --- a/doc/HOWTO_BUILD_DOCS.txt +++ b/doc/HOWTO_BUILD_DOCS.txt @@ -2,70 +2,97 @@ Building the NumPy API and reference docs ========================================= -Using Sphinx_ -------------- -`Download <https://code.launchpad.net/~stefanv/scipy/numpy-refguide>`_ -the builder. Follow the instructions in ``README.txt``. +We currently use Sphinx_ for generating the API and reference +documentation for Numpy. You will need Sphinx 0.5 or newer. Sphinx's +current development version also works as of now (2009-06-24), and +using it is recommended though not required. +If you only want to get the documentation, note that pre-built +versions can be found at -Using Epydoc_ -------------- + http://docs.scipy.org/ -Currently, we recommend that you build epydoc from the trunk:: +in several different formats. - svn co https://epydoc.svn.sf.net/svnroot/epydoc/trunk/epydoc epydoc - cd epydoc/src - sudo python setup.py install +.. _Sphinx: http://sphinx.pocoo.org -The appearance of some elements can be changed in the epydoc.css -style sheet. -Emphasized text appearance can be controlled by the definition of the <em> -tag. For instance, to make them bold, insert:: +Instructions +------------ - em {font-weight: bold;} +Since large parts of the main documentation are stored in +docstrings, you will need to first build Numpy, and install it so +that the correct version is imported by -The variables' types are in a span of class rst-classifier, hence can be -changed by inserting something like:: + >>> import numpy - span.rst-classifier {font-weight: normal;} +Note that you can eg. install Numpy to a temporary location and set +the PYTHONPATH environment variable appropriately. Also note that if +you have a system Numpy installed via Python eggs, you will also need +to use ``setupegg.py`` to install the temporary Numpy. -The first line of the signature should **not** copy the signature unless -the function is written in C, in which case it is mandatory. If the function -signature is generic (uses ``*args`` or ``**kwds``), then a function signature -may be included. +After Numpy is installed, write:: -Use optional in the "type" field for parameters that are non-keyword -optional for C-functions. + make html -Epydoc depends on Docutils for reStructuredText parsing. You can -download Docutils from the `Docutils sourceforge -page. <http://docutils.sourceforge.net/>`_. The version in SVN is -broken, so use 0.4 or the patched version from Debian. You may also -be able to use a package manager like yum to install it:: +in this ``doc/`` directory. If all goes well, this will generate a +``build/html`` subdirectory containing the built documentation. Note +that building the documentation on Windows is currently not actively +supported, though it should be possible. (See Sphinx_ documentation +for more information.) - $ sudo yum install python-docutils +To build the PDF documentation, do instead:: + make latex + make -C build/latex all-pdf -Example -------- -Here is a short example module, -`plain text <http://svn.scipy.org/svn/numpy/trunk/doc/example.py>`_ -or -`rendered <http://www.scipy.org/doc/example>`_ in HTML. +You will need to have Latex installed for this. -To try this yourself, simply download the example.py:: +In addition to the above, you can also do:: - svn co http://svn.scipy.org/svn/numpy/trunk/doc/example.py . + make dist -Then, run epydoc:: +which will rebuild Numpy, install it to a temporary location, and +build the documentation in all formats. This will most likely again +only work on Unix platforms. - $ epydoc --docformat=restructuredtext example.py -The output is placed in ``./html``, and may be viewed by loading the -``index.html`` file into your browser. +Sphinx extensions +----------------- +Numpy's documentation uses several custom extensions to Sphinx. These +are shipped in the ``sphinxext/`` directory, and are automatically +enabled when building Numpy's documentation. +However, if you want to make use of these extensions in third-party +projects, they are available on PyPi_ as the numpydoc_ package, and +can be installed with:: -.. _epydoc: http://epydoc.sourceforge.net/ -.. _sphinx: http://sphinx.pocoo.org + easy_install numpydoc + +In addition, you will need to add:: + + extensions = ['numpydoc'] + +to ``conf.py`` in your Sphinx documentation. + +The following extensions are available: + + - ``numpydoc``: support for Numpy docstring format in Sphinx. + + - ``numpydoc.only_directives``: (DEPRECATED) + + - ``numpydoc.plot_directives``: Adaptation of Matplotlib's ``plot::`` + directive. Note that this implementation may still undergo severe + changes or be eventually deprecated. + + - ``numpydoc.autosummary``: (DEPRECATED) An ``autosummary::`` directive. + Available in Sphinx 0.6.2 and (to-be) 1.0 as ``sphinx.ext.autosummary``, + and it the Sphinx 1.0 version is recommended over that included in + Numpydoc. + + - ``numpydoc.traitsdoc``: For gathering documentation about Traits attributes. + + +.. _PyPi: http://python.org/pypi +.. _numpydoc: http://python.org/pypi/numpydoc diff --git a/doc/HOWTO_DOCUMENT.txt b/doc/HOWTO_DOCUMENT.txt index 1cea22f1f..efe731ff7 100644 --- a/doc/HOWTO_DOCUMENT.txt +++ b/doc/HOWTO_DOCUMENT.txt @@ -61,12 +61,12 @@ docstrings that provides for consistency, while also allowing our toolchain to produce well-formatted reference guides. This document describes the current community consensus for such a standard. If you have suggestions for improvements, post them on the `numpy-discussion -list`_, together with the epydoc output. +list`_. Our docstring standard uses `re-structured text (reST) <http://docutils.sourceforge.net/rst.html>`_ syntax and is rendered -using tools like epydoc_ or sphinx_ (pre-processors that understand -the particular documentation style we are using). While a rich set of +using Sphinx_ (a pre-processor that understands the particular +documentation style we are using). While a rich set of markup is available, we limit ourselves to a very basic subset, in order to provide docstrings that are easy to read on text-only terminals. @@ -74,15 +74,18 @@ terminals. A guiding principle is that human readers of the text are given precedence over contorting docstrings so our tools produce nice output. Rather than sacrificing the readability of the docstrings, we -have written pre-processors to assist tools like epydoc_ and sphinx_ in -their task. +have written pre-processors to assist Sphinx_ in its task. + +The length of docstring lines should be kept to 75 characters to +facilitate reading the docstrings in text terminals. Status ------ We are busy converting existing docstrings to the new format, expanding them where they are lacking, as well as writing new ones for undocumented functions. Volunteers are welcome to join the effort on -our new documentation system (see the `Developer Zone +our new documentation system (see the `Documentation Editor +<http://docs.scipy.org/doc/>`_ and the `Developer Zone <http://www.scipy.org/Developer_Zone/DocMarathon2008>`_). Sections @@ -96,7 +99,7 @@ The sections of the docstring are: :: - def add(a,b): + def add(a, b): """The sum of two numbers. """ @@ -107,7 +110,7 @@ The sections of the docstring are: it as the first line of the docstring:: """ - add(a,b) + add(a, b) The sum of two numbers. @@ -147,11 +150,17 @@ The sections of the docstring are: over all axes). When a parameter can only assume one of a fixed set of values, - those values can be listed in braces :: + those values can be listed in braces:: x : {True, False} Description of `x`. + When two or more input parameters have exactly the same type, shape and + description, they can be combined:: + + x1, x2 : array_like + Input arrays, description of `x1`, `x2`. + 4. **Returns** Explanation of the returned values and their types, of the same @@ -173,6 +182,9 @@ The sections of the docstring are: LinAlgException If the matrix is not numerically invertible. + This section should be used judiciously, i.e only for errors + that are non-obvious or have a large chance of getting raised. + 7. **See Also** An optional section used to refer to related code. This section @@ -203,7 +215,8 @@ The sections of the docstring are: scipy.random.norm : Random variates, PDFs, etc. - Functions may be listed without descriptions:: + Functions may be listed without descriptions, and this is + preferable if the functionality is clear from the function name:: See Also -------- @@ -286,59 +299,39 @@ The sections of the docstring are: 10. **Examples** - An optional section for examples, using the `doctest - <http://www.python.org/doc/lib/module-doctest.html>`_ format. - This section is meant to illustrate usage, not to provide a - testing framework -- for that, use the ``tests/`` directory. - While optional, this section is very strongly encouraged. You can - run these examples by doing:: - - >>> import doctest - >>> doctest.testfile('example.py') - - or, using nose, - - :: - - $ nosetests --with-doctest example.py + An optional section for examples, using the `doctest + <http://www.python.org/doc/lib/module-doctest.html>`_ format. + This section is meant to illustrate usage, not to provide a + testing framework -- for that, use the ``tests/`` directory. + While optional, this section is very strongly encouraged. - Blank lines are used to seperate doctests. When they occur in the - expected output, they should be replaced by ``<BLANKLINE>`` (see - `doctest options - <http://docs.python.org/lib/doctest-options.html>`_ for other such - special strings), e.g. + When multiple examples are provided, they should be separated by + blank lines. Comments explaining the examples should have blank + lines both above and below them:: - :: + >>> np.add(1, 2) + 3 - >>> print "a\n\nb" - a - <BLANKLINE> - b + Comment explaining the second example - The examples may assume that ``import numpy as np`` is executed before - the example code in *numpy*, and ``import scipy as sp`` in *scipy*. - Additional examples may make use of *matplotlib* for plotting, but should - import it explicitly, e.g., ``import matplotlib.pyplot as plt``. + >>> np.add([1, 2], [3, 4]) + array([4, 6]) -11. **Indexing tags*** + You can run examples using:: - Each function needs to be categorised for indexing purposes. Use - the ``index`` directive:: + >>> np.test(doctests=True) - .. index:: - :refguide: ufunc, trigonometry + It is not necessary to use the doctest markup ``<BLANKLINE>`` to + indicate empty lines in the output. Note that the option to run + the examples through ``numpy.test`` is provided for checking if the + examples work, not for making the examples part of the testing framework. - To index a function as a sub-category of a class, separate index - entries by a colon, e.g. - - :: - - :refguide: ufunc, numpy:reshape, other - - A `list of available categories - <http://www.scipy.org/Developer_Zone/ReferenceGuide>`_ is - available. + The examples may assume that ``import numpy as np`` is executed before + the example code in *numpy*, and ``import scipy as sp`` in *scipy*. + Additional examples may make use of *matplotlib* for plotting, but should + import it explicitly, e.g., ``import matplotlib.pyplot as plt``. + Documenting classes ------------------- @@ -346,9 +339,11 @@ Class docstring ``````````````` Use the same sections as outlined above (all except ``Returns`` are applicable). The constructor (``__init__``) should also be documented -here. +here, the **parameters** section of the docstring details the constructors +parameters. -An ``Attributes`` section may be used to describe class variables:: +An ``Attributes`` section, located below the **parameters** section, +may be used to describe class variables:: Attributes ---------- @@ -357,6 +352,18 @@ An ``Attributes`` section may be used to describe class variables:: y : float The Y coordinate. +Attributes that are properties and have their own docstrings can be +simply listed by name:: + + Attributes + ---------- + real + imag + x : float + The X coordinate + y : float + The Y coordinate + In general, it is not necessary to list class methods. Those that are not part of the public API have names that start with an underscore. In some cases, however, a class may have a great many methods, of @@ -383,6 +390,10 @@ becomes useful to have an additional ``Methods`` section:: """ +If it is necessary to explain a private method (use with care!), it can +be referred to in the **extended summary** or the **notes**. Do not +list private methods in the Methods section. + Note that `self` is *not* listed as the first parameter of methods. Method docstrings @@ -390,6 +401,60 @@ Method docstrings Document these as you would any other function. Do not include ``self`` in the list of parameters. +Documenting class instances +--------------------------- +Instances of classes that are part of the Numpy API (for example `np.r_` +`np,c_`, `np.index_exp`, etc.) may require some care. To give these +instances a useful docstring, we do the following: + +* Single instance: If only a single instance of a class is exposed, + document the class. Examples can use the instance name. + +* Multiple instances: If multiple instances are exposed, docstrings + for each instance are written and assigned to the instances' + ``__doc__`` attributes at run time. The class is documented as usual, and + the exposed instances can be mentioned in the Notes and See Also sections. + +Documenting constants +--------------------- +Use the same sections as outlined for functions where applicable:: + + 1. summary + 2. extended summary (optional) + 3. see also (optional) + 4. references (optional) + 5. examples (optional) + +Docstrings for constants will not be visible in text terminals +(constants are of immutable type, so docstrings can not be assigned +to them like for for class instances), but will appear in the +documentation built with Sphinx. + +Other points to keep in mind +---------------------------- +* Notes and Warnings : If there are points in the docstring that deserve + special emphasis, the reST directives for a note or warning can be used + in the vicinity of the context of the warning (inside a section). Syntax: + + :: + + .. warning:: Warning text. + + .. note:: Note text. + + Use these sparingly, as they do not look very good in text terminals + and are not often necessary. One situation in which a warning can + be useful is for marking a known bug that is not yet fixed. + +* Questions and Answers : For general questions on how to write docstrings + that are not answered in this document, refer to + `<http://docs.scipy.org/numpy/Questions+Answers/>`_. + +* array_like : For functions that take arguments which can have not only + a type `ndarray`, but also types that can be converted to an ndarray + (i.e. scalar types, sequence types), those arguments can be documented + with type `array_like`. + Common reST concepts -------------------- For paragraphs, indentation is significant and indicates indentation in the @@ -412,12 +477,11 @@ followed. Conclusion ---------- -`An example -<http://svn.scipy.org/svn/numpy/trunk/doc/example.py>`_ of the +`An example <http://svn.scipy.org/svn/numpy/trunk/doc/example.py>`_ of the format shown here is available. Refer to `How to Build API/Reference Documentation <http://svn.scipy.org/svn/numpy/trunk/doc/HOWTO_BUILD_DOCS.txt>`_ -on how to use epydoc_ or sphinx_ to construct a manual and web page. +on how to use Sphinx_ to build the manual. This document itself was written in ReStructuredText, and may be converted to HTML using:: @@ -426,5 +490,4 @@ HTML using:: .. _SciPy: http://www.scipy.org .. _numpy-discussion list: http://www.scipy.org/Mailing_Lists -.. _epydoc: http://epydoc.sourceforge.net/ -.. _sphinx: http://sphinx.pocoo.org +.. _Sphinx: http://sphinx.pocoo.org diff --git a/doc/Makefile b/doc/Makefile index 82dcc741c..24959750a 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -132,6 +132,10 @@ htmlhelp-build: htmlhelp build/htmlhelp/numpy.chm %.chm: %.hhp -hhc.exe $^ +qthelp: generate + mkdir -p build/qthelp build/doctrees + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) build/qthelp + latex: generate mkdir -p build/latex build/doctrees $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex diff --git a/doc/README.txt b/doc/README.txt deleted file mode 100644 index 2a7ad82ee..000000000 --- a/doc/README.txt +++ /dev/null @@ -1,40 +0,0 @@ -NumPy Reference Guide -===================== - -Instructions ------------- -1. Optionally download an XML dump of the newest docstrings from the doc wiki - at ``/pydocweb/dump`` and save it as ``dump.xml``. -2. Run ``make html`` or ``make dist`` - -You can also run ``summarize.py`` to see which parts of the Numpy -namespace are documented. - - -TODO ----- - -* Numberless [*] footnotes cause LaTeX errors. - -* ``See also`` sections are still somehow broken even if some work. - The problem is that Sphinx searches like this:: - - 'name' - 'active_module.name' - 'active_module.active_class.name'. - - Whereas, we would like to have this: - - 'name' - 'active_module.name' - 'parent_of_active_module.name' - 'parent_of_parent_of_active_module.name' - ... - 'numpy.name' - - We can get one step upwards by always using 'numpy' as the active module. - It seems difficult to beat Sphinx to do what we want. - Do we need to change our docstring standard slightly, ie. allow only - leaving the 'numpy.' prefix away? - -* Link resolution doesn't work as intended... eg. `doc.ufunc`_ diff --git a/doc/TESTS.txt b/doc/TESTS.txt index 00d89d21e..40524b82a 100644 --- a/doc/TESTS.txt +++ b/doc/TESTS.txt @@ -8,100 +8,140 @@ NumPy/SciPy Testing Guidelines Introduction '''''''''''' -SciPy uses the `Nose testing system <http://www.somethingaboutorange.com/mrl/projects/nose>`__, with some minor convenience features added. Nose is an extension of the unit testing framework offered by `unittest.py <http://docs.python.org/lib/module-unittest.html>`__. Our goal is that every module and package in SciPy should have a thorough set of unit tests. These tests should exercise the full functionality of a given routine as well as its robustness to erroneous or unexpected input arguments. Long experience has shown that by far the best time to write the tests is before you write or change the code - this is `test-driven development <http://en.wikipedia.org/wiki/Test-driven_development>`__. The arguments for this can sound rather abstract, but we can assure you that you will find that writing the tests first leads to more robust and better designed code. Well-designed tests with good coverage make an enormous difference to the ease of refactoring. Whenever a new bug is found in a routine, you should write a new test for that specific case and add it to the test suite to prevent that bug from creeping back in unnoticed. +SciPy uses the `Nose testing system +<http://www.somethingaboutorange.com/mrl/projects/nose>`__, with some +minor convenience features added. Nose is an extension of the unit +testing framework offered by `unittest.py +<http://docs.python.org/lib/module-unittest.html>`__. Our goal is that +every module and package in SciPy should have a thorough set of unit +tests. These tests should exercise the full functionality of a given +routine as well as its robustness to erroneous or unexpected input +arguments. Long experience has shown that by far the best time to +write the tests is before you write or change the code - this is +`test-driven development +<http://en.wikipedia.org/wiki/Test-driven_development>`__. The +arguments for this can sound rather abstract, but we can assure you +that you will find that writing the tests first leads to more robust +and better designed code. Well-designed tests with good coverage make +an enormous difference to the ease of refactoring. Whenever a new bug +is found in a routine, you should write a new test for that specific +case and add it to the test suite to prevent that bug from creeping +back in unnoticed. To run SciPy's full test suite, use the following:: >>> import scipy >>> scipy.test() -SciPy uses the testing framework from NumPy (specifically ``numpy.testing``), so all the SciPy examples shown here are also applicable to NumPy. So NumPy's full test suite can be run as follows:: +SciPy uses the testing framework from NumPy (specifically +``numpy.testing``), so all the SciPy examples shown here are also +applicable to NumPy. So NumPy's full test suite can be run as +follows:: >>> import numpy >>> numpy.test() -The test method may take two or more arguments; the first is a string label specifying what should be tested and the second is an integer giving the level of output verbosity. See the docstring for numpy.test for details. The default value for the label is 'fast' - which will run the standard tests. The string 'full' will run the full battery of tests, including those identified as being slow to run. If the verbosity is 1 or less, the tests will just show information messages about the tests that are run; but if it is greater than 1, then the tests will also provide warnings on missing tests. So if you want to run every test and get messages about which modules don't have tests:: +The test method may take two or more arguments; the first is a string +label specifying what should be tested and the second is an integer +giving the level of output verbosity. See the docstring for numpy.test +for details. The default value for the label is 'fast' - which will +run the standard tests. The string 'full' will run the full battery +of tests, including those identified as being slow to run. If the +verbosity is 1 or less, the tests will just show information messages +about the tests that are run; but if it is greater than 1, then the +tests will also provide warnings on missing tests. So if you want to +run every test and get messages about which modules don't have tests:: - >>> scipy.test(label='full', verbosity=2) # or - >>> scipy.test('full', 2) + >>> scipy.test(label='full', verbosity=2) # or scipy.test('full', 2) -Finally, if you are only interested in testing a subset of SciPy, for example, the ``integrate`` module, use the following:: +Finally, if you are only interested in testing a subset of SciPy, for +example, the ``integrate`` module, use the following:: >>> scipy.integrate.test() -The rest of this page will give you a basic idea of how to add unit tests to modules in SciPy. It is extremely important for us to have extensive unit testing since this code is going to be used by scientists and researchers and is being developed by a large number of people spread across the world. So, if you are writing a package that you'd like to become part of SciPy, please write the tests as you develop the package. Also since much of SciPy is legacy code that was originally written without unit tests, there are still several modules that don't have tests yet. Please feel free to choose one of these modules to develop test for either after or even as you read through this introduction. +The rest of this page will give you a basic idea of how to add unit +tests to modules in SciPy. It is extremely important for us to have +extensive unit testing since this code is going to be used by +scientists and researchers and is being developed by a large number of +people spread across the world. So, if you are writing a package that +you'd like to become part of SciPy, please write the tests as you +develop the package. Also since much of SciPy is legacy code that was +originally written without unit tests, there are still several modules +that don't have tests yet. Please feel free to choose one of these +modules to develop test for either after or even as you read through +this introduction. Writing your own tests '''''''''''''''''''''' -Every Python module, extension module, or subpackage in the SciPy package directory should have a corresponding ``test_<name>.py`` file. The nose framework picks up tests by first looking for any functions in the file that have test-related names (see below), or classes that inherit from ``unittest.TestCase`` (which is also made available as ``numpy.testing.TestCase``. Any methods of these classes, that also have test-related names, are considered tests. A test-related name is simply a function or method name containing 'test'. +Every Python module, extension module, or subpackage in the SciPy +package directory should have a corresponding ``test_<name>.py`` file. +Nose examines these files for test methods (named test*) and test +classes (named Test*). -Suppose you have a SciPy module ``scipy/xxx/yyy.py`` containing a function ``zzz()``. To test this you would start by creating a test module called ``test_yyy.py``. There are several different ways to implement tests using the nose / SciPy system. There is the standard unittest way and the nose test function way. +Suppose you have a SciPy module ``scipy/xxx/yyy.py`` containing a +function ``zzz()``. To test this function you would create a test +module called ``test_yyy.py``. If you only need to test one aspect of +``zzz``, you can simply add a test function:: -Standard unit test classes --------------------------- + def test_zzz(): + assert zzz() == 'Hello from zzz' -You can use the traditional unittest system by making your test file include a class that tests ``zzz()``. The test class inherits from the TestCase class, and has test methods that test various aspects of ``zzz()``. Within these test methods, ``assert()`` is used to test whether some case is true. If the assert fails, the test fails. The line ``nose.run(...)`` function actually runs the test suite. A minimal example of a ``test_yyy.py`` file that implements tests for a Scipy package module ``scipy.xxx.yyy``, is shown below:: +More often, we need to group a number of tests together, so we create +a test class:: from numpy.testing import * # import xxx symbols from scipy.xxx.yyy import zzz - class test_zzz(TestCase): + class TestZzz: def test_simple(self): - assert zzz()=='Hello from zzz' - #... + assert zzz() == 'Hello from zzz' - if __name__ == "__main__": - run_module_suite() - - -Note that all classes that are inherited from ``TestCase`` class, are picked up by the test runner. For more detailed information on defining test classes see the official documentation for the `Python Unit testing framework <http://docs.python.org/lib/module-unittest.html>`__. - -Using test functions with nose ------------------------------- + def test_invalid_parameter(self): + assert_raises(...) -This is as simple as making a function or functions with names including 'test':: +Within these test methods, ``assert()`` is used to test whether a +certain assumption is valid. If the assert fails, the test fails. - from numpy.testing import * - - # import xxx symbols - from scipy.xxx.yyy import zzz - - def test_simple(self): - assert zzz()=='Hello from zzz' +Sometimes it is convenient to run ``test_yyy.py`` by itself, so we add +:: if __name__ == "__main__": run_module_suite() - -You can mix nose test functions and TestCase classes in a single test file. +at the bottom. Labeling tests with nose ------------------------ -Unlabeled tests like the ones above are run in the default ``scipy.test()`` run. If you want to label your test as slow - and therefore reserved for a full ``scipy.test(label='full')`` run, you can label it with a nose decorator:: +Unlabeled tests like the ones above are run in the default +``scipy.test()`` run. If you want to label your test as slow - and +therefore reserved for a full ``scipy.test(label='full')`` run, you +can label it with a nose decorator:: # numpy.testing module includes 'import decorators as dec' from numpy.testing import * + @dec.slow def test_big(self): print 'Big, slow test' Similarly for methods:: - class test_zzz(TestCase): + class test_zzz: @dec.slow def test_simple(self): - assert zzz()=='Hello from zzz' + assert zzz() == 'Hello from zzz' Easier setup and teardown functions / methods --------------------------------------------- -Nose looks for module level setup and teardown functions by name; thus:: +Nose looks for module level setup and teardown functions by name; +thus:: def setup(): """Module-level setup""" @@ -112,7 +152,8 @@ Nose looks for module level setup and teardown functions by name; thus:: print 'doing teardown' -You can add setup and teardown functions to functions and methods with nose decorators:: +You can add setup and teardown functions to functions and methods with +nose decorators:: import nose from numpy.testing import * @@ -138,7 +179,9 @@ You can add setup and teardown functions to functions and methods with nose deco Parametric tests ---------------- -One very nice feature of nose is allowing easy testing across a range of parameters - a nasty problem for standard unit tests. It does this with test generators:: +One very nice feature of nose is allowing easy testing across a range +of parameters - a nasty problem for standard unit tests. It does this +with test generators:: def check_even(n, nn): """A check function to be used in a test generator.""" @@ -148,31 +191,52 @@ One very nice feature of nose is allowing easy testing across a range of paramet for i in range(0,4,2): yield check_even, i, i*3 -Note that 'check_even' is not itself a test (no 'test' in the name), but 'test_evens' is a generator that returns a series of tests, using 'check_even', across a range of inputs. Nice. +Note that 'check_even' is not itself a test (no 'test' in the name), +but 'test_evens' is a generator that returns a series of tests, using +'check_even', across a range of inputs. + +.. warning:: + + Parametric tests cannot be implemented on classes derived from + TestCase. Doctests -------- -Doctests are a convenient way of documenting the behavior a function and allowing that behavior to be tested at the same time. The output of an interactive Python session can be included in the docstring of a function, and the test framework can run the example and compare the actual output to the expected output. +Doctests are a convenient way of documenting the behavior a function +and allowing that behavior to be tested at the same time. The output +of an interactive Python session can be included in the docstring of a +function, and the test framework can run the example and compare the +actual output to the expected output. -The doctests can be run by adding the ``doctests`` argument to the ``test()`` call; for example, to run all tests (including doctests) for numpy.lib:: +The doctests can be run by adding the ``doctests`` argument to the +``test()`` call; for example, to run all tests (including doctests) +for numpy.lib:: >>> import numpy as np >>> np.lib.test(doctests=True) -The doctests are run as if they are in a fresh Python instance which has executed ``import numpy as np`` (tests that are part of the SciPy package also have an implicit ``import scipy as sp``). +The doctests are run as if they are in a fresh Python instance which +has executed ``import numpy as np`` (tests that are part of the SciPy +package also have an implicit ``import scipy as sp``). ``tests/`` ---------- -Rather than keeping the code and the tests in the same directory, we put all the tests for a given subpackage in a ``tests/`` subdirectory. For our example, if it doesn't all ready exist you will need to create a ``tests/`` directory in ``scipy/xxx/``. So the path for ``test_yyy.py`` is ``scipy/xxx/tests/test_yyy.py``. +Rather than keeping the code and the tests in the same directory, we +put all the tests for a given subpackage in a ``tests/`` +subdirectory. For our example, if it doesn't all ready exist you will +need to create a ``tests/`` directory in ``scipy/xxx/``. So the path +for ``test_yyy.py`` is ``scipy/xxx/tests/test_yyy.py``. -Once the ``scipy/xxx/tests/test_yyy.py`` is written, its possible to run the tests by going to the ``tests/`` directory and typing:: +Once the ``scipy/xxx/tests/test_yyy.py`` is written, its possible to +run the tests by going to the ``tests/`` directory and typing:: python test_yyy.py -Or if you add ``scipy/xxx/tests/`` to the Python path, you could run the tests interactively in the interpreter like this:: +Or if you add ``scipy/xxx/tests/`` to the Python path, you could run +the tests interactively in the interpreter like this:: >>> import test_yyy >>> test_yyy.test() @@ -180,14 +244,18 @@ Or if you add ``scipy/xxx/tests/`` to the Python path, you could run the tests i ``__init__.py`` and ``setup.py`` -------------------------------- -Usually however, adding the ``tests/`` directory to the python path isn't desirable. Instead it would better to invoke the test straight from the module ``xxx``. To this end, simply place the following lines at the end of your package's ``__init__.py`` file:: +Usually, however, adding the ``tests/`` directory to the python path +isn't desirable. Instead it would better to invoke the test straight +from the module ``xxx``. To this end, simply place the following lines +at the end of your package's ``__init__.py`` file:: ... def test(level=1, verbosity=1): from numpy.testing import Tester return Tester().test(level, verbosity) -You will also need to add the tests directory in the configuration section of your setup.py:: +You will also need to add the tests directory in the configuration +section of your setup.py:: ... def configuration(parent_package='', top_path=None): @@ -201,10 +269,11 @@ Now you can do the following to test your module:: >>> import scipy >>> scipy.xxx.test() -Also, when invoking the entire SciPy test suite, your tests will be found and run:: +Also, when invoking the entire SciPy test suite, your tests will be +found and run:: >>> import scipy - >>> scipy.test() + >>> scipy.test() # your tests are included and run automatically! Tips & Tricks @@ -213,7 +282,12 @@ Tips & Tricks Creating many similar tests --------------------------- -If you have a collection of tests that must be run multiple times with minor variations, it can be helpful to create a base class containing all the common tests, and then create a subclass for each variation. Several examples of this technique exist in NumPy; below are excerpts from one in `numpy/linalg/tests/test_linalg.py <http://svn.scipy.org/svn/numpy/trunk/numpy/linalg/tests/test_linalg.py>`__:: +If you have a collection of tests that must be run multiple times with +minor variations, it can be helpful to create a base class containing +all the common tests, and then create a subclass for each variation. +Several examples of this technique exist in NumPy; below are excerpts +from one in `numpy/linalg/tests/test_linalg.py +<http://svn.scipy.org/svn/numpy/trunk/numpy/linalg/tests/test_linalg.py>`__:: class LinalgTestCase: def test_single(self): @@ -228,24 +302,30 @@ If you have a collection of tests that must be run multiple times with minor var ... - class TestSolve(LinalgTestCase, TestCase): + class TestSolve(LinalgTestCase): def do(self, a, b): x = linalg.solve(a, b) assert_almost_equal(b, dot(a, x)) assert imply(isinstance(b, matrix), isinstance(x, matrix)) - class TestInv(LinalgTestCase, TestCase): + class TestInv(LinalgTestCase): def do(self, a, b): a_inv = linalg.inv(a) assert_almost_equal(dot(a, a_inv), identity(asarray(a).shape[0])) assert imply(isinstance(a, matrix), isinstance(a_inv, matrix)) -In this case, we wanted to test solving a linear algebra problem using matrices of several data types, using ``linalg.solve`` and ``linalg.inv``. The common test cases (for single-precision, double-precision, etc. matrices) are collected in ``LinalgTestCase``. Note that ``LinalgTestCase`` is not descended from ``TestCase``--if it were, then nose would attempt to run ``LinalgTestCase.test_single`` and ``LinalgTestCase.test_double``, which would fail because ``LinalgTestCase`` has no ``do`` method. Since ``TestSolve`` and ``TestInv`` inherit from ``LinalgTestCase`` and ``TestCase``, nose will run ``test_single`` and ``test_double`` for each class. +In this case, we wanted to test solving a linear algebra problem using +matrices of several data types, using ``linalg.solve`` and +``linalg.inv``. The common test cases (for single-precision, +double-precision, etc. matrices) are collected in ``LinalgTestCase``. Known failures & skipping tests ------------------------------- -Sometimes you might want to skip a test or mark it as a known failure, such as when the test suite is being written before the code it's meant to test, or if a test only fails on a particular architecture. The decorators from numpy.testing.dec can be used to do this. +Sometimes you might want to skip a test or mark it as a known failure, +such as when the test suite is being written before the code it's +meant to test, or if a test only fails on a particular architecture. +The decorators from numpy.testing.dec can be used to do this. To skip a test, simply use ``skipif``:: @@ -255,7 +335,10 @@ To skip a test, simply use ``skipif``:: def test_something(foo): ... -The test is marked as skipped if ``SkipMyTest`` evaluates to nonzero, and the message in verbose test output is the second argument given to ``skipif``. Similarly, a test can be marked as a known failure by using ``knownfailureif``:: +The test is marked as skipped if ``SkipMyTest`` evaluates to nonzero, +and the message in verbose test output is the second argument given to +``skipif``. Similarly, a test can be marked as a known failure by +using ``knownfailureif``:: from numpy.testing import * @@ -263,8 +346,12 @@ The test is marked as skipped if ``SkipMyTest`` evaluates to nonzero, and the me def test_something_else(foo): ... -Of course, a test can be unconditionally skipped or marked as a known failure by passing ``True`` as the first argument to ``skipif`` or ``knownfailureif``, respectively. - -A total of the number of skipped and known failing tests is displayed at the end of the test run. Skipped tests are marked as ``'S'`` in the test results (or ``'SKIPPED'`` for ``verbosity > 1``), and known failing tests are marked as ``'K'`` (or ``'KNOWN'`` if ``verbosity > 1``). - +Of course, a test can be unconditionally skipped or marked as a known +failure by passing ``True`` as the first argument to ``skipif`` or +``knownfailureif``, respectively. +A total of the number of skipped and known failing tests is displayed +at the end of the test run. Skipped tests are marked as ``'S'`` in +the test results (or ``'SKIPPED'`` for ``verbosity > 1``), and known +failing tests are marked as ``'K'`` (or ``'KNOWN'`` if ``verbosity > +1``). diff --git a/doc/example.py b/doc/example.py index 152e2a622..0d5b53a33 100644 --- a/doc/example.py +++ b/doc/example.py @@ -35,7 +35,7 @@ import matplotlib.pyplot as plt from my_module import my_func, other_func def foo(var1, var2, long_var_name='hi') : - """A one-line summary that does not use variable names or the + r"""A one-line summary that does not use variable names or the function name. Several sentences providing an extended description. Refer to @@ -58,11 +58,11 @@ def foo(var1, var2, long_var_name='hi') : ------- describe : type Explanation - output + output : type Explanation - tuple + tuple : type Explanation - items + items : type even more explaining Other Parameters @@ -117,7 +117,6 @@ def foo(var1, var2, long_var_name='hi') : [4, 5, 6] >>> print "a\n\nb" a - <BLANKLINE> b """ diff --git a/doc/neps/datetime-proposal3.rst b/doc/neps/datetime-proposal3.rst index aad549a88..6874aac13 100644 --- a/doc/neps/datetime-proposal3.rst +++ b/doc/neps/datetime-proposal3.rst @@ -77,6 +77,7 @@ corresponding time spans. s second [ 2.9e9 BC, 2.9e9 AC] ms millisecond [ 2.9e6 BC, 2.9e6 AC] us microsecond [290301 BC, 294241 AC] + c# ticks (100ns) [ 2757 BC, 31197 AC] ns nanosecond [ 1678 AC, 2262 AC] ======== ================ ========================== @@ -97,10 +98,9 @@ Using the long string notation:: Using the short string notation:: - dtype('T8[us]') + dtype('M8[us]') -Note that a time unit should always be specified, as there is not a -default. +The default is microseconds if no time unit is specified. Thus, 'M8' is equivalent to 'M8[us]' Setting and getting values @@ -108,7 +108,7 @@ Setting and getting values The objects with this dtype can be set in a series of ways:: - t = numpy.ones(3, dtype='T8[s]') + t = numpy.ones(3, dtype='M8[s]') t[0] = 1199164176 # assign to July 30th, 2008 at 17:31:00 t[1] = datetime.datetime(2008, 7, 30, 17, 31, 01) # with datetime module t[2] = '2008-07-30T17:31:02' # with ISO 8601 @@ -128,28 +128,28 @@ Comparisons The comparisons will be supported too:: - numpy.array(['1980'], 'T8[Y]') == numpy.array(['1979'], 'T8[Y]') + numpy.array(['1980'], 'M8[Y]') == numpy.array(['1979'], 'M8[Y]') --> [False] or by applying broadcasting:: - numpy.array(['1979', '1980'], 'T8[Y]') == numpy.datetime64('1980', 'Y') + numpy.array(['1979', '1980'], 'M8[Y]') == numpy.datetime64('1980', 'Y') --> [False, True] The next should work too:: - numpy.array(['1979', '1980'], 'T8[Y]') == '1980-01-01' + numpy.array(['1979', '1980'], 'M8[Y]') == '1980-01-01' --> [False, True] because the right hand expression can be broadcasted into an array of 2 -elements of dtype 'T8[Y]'. +elements of dtype 'M8[Y]'. Compatibility issues ~~~~~~~~~~~~~~~~~~~~ This will be fully compatible with the ``datetime`` class of the ``datetime`` module of Python only when using a time unit of -microseconds. For other time units, the conversion process will loose +microseconds. For other time units, the conversion process will lose precision or will overflow as needed. The conversion from/to a ``datetime`` object doesn't take leap seconds into account. @@ -187,6 +187,7 @@ corresponding time spans. s second +- 2.9e12 years ms millisecond +- 2.9e9 years us microsecond +- 2.9e6 years + c# ticks (100ns) +- 2.9e4 years ns nanosecond +- 292 years ps picosecond +- 106 days fs femtosecond +- 2.6 hours @@ -207,17 +208,17 @@ Using the long string notation:: Using the short string notation:: - dtype('t8[us]') + dtype('m8[us]') + +The default is micro-seconds if no default is specified: 'm8' is equivalent to 'm8[us]' -Note that a time unit should always be specified, as there is not a -default. Setting and getting values ~~~~~~~~~~~~~~~~~~~~~~~~~~ The objects with this dtype can be set in a series of ways:: - t = numpy.ones(3, dtype='t8[ms]') + t = numpy.ones(3, dtype='m8[ms]') t[0] = 12 # assign to 12 ms t[1] = datetime.timedelta(0, 0, 13000) # 13 ms t[2] = '0:00:00.014' # 14 ms @@ -236,21 +237,21 @@ Comparisons The comparisons will be supported too:: - numpy.array([12, 13, 14], 't8[ms]') == numpy.array([12, 13, 13], 't8[ms]') + numpy.array([12, 13, 14], 'm8[ms]') == numpy.array([12, 13, 13], 'm8[ms]') --> [True, True, False] or by applying broadcasting:: - numpy.array([12, 13, 14], 't8[ms]') == numpy.timedelta64(13, 'ms') + numpy.array([12, 13, 14], 'm8[ms]') == numpy.timedelta64(13, 'ms') --> [False, True, False] The next should work too:: - numpy.array([12, 13, 14], 't8[ms]') == '0:00:00.012' + numpy.array([12, 13, 14], 'm8[ms]') == '0:00:00.012' --> [True, False, False] because the right hand expression can be broadcasted into an array of 3 -elements of dtype 't8[ms]'. +elements of dtype 'm8[ms]'. Compatibility issues ~~~~~~~~~~~~~~~~~~~~ @@ -340,12 +341,12 @@ Operating with date/time arrays The only arithmetic operation allowed between absolute dates is the subtraction:: - In [10]: numpy.ones(3, "T8[s]") - numpy.zeros(3, "T8[s]") + In [10]: numpy.ones(3, "M8[s]") - numpy.zeros(3, "M8[s]") Out[10]: array([1, 1, 1], dtype=timedelta64[s]) But not other operations:: - In [11]: numpy.ones(3, "T8[s]") + numpy.zeros(3, "T8[s]") + In [11]: numpy.ones(3, "M8[s]") + numpy.zeros(3, "M8[s]") TypeError: unsupported operand type(s) for +: 'numpy.ndarray' and 'numpy.ndarray' Comparisons between absolute dates are allowed. @@ -360,12 +361,12 @@ time units can be very different, and it is not clear at all what time unit will be preferred for the user. For example, this should be allowed:: - >>> numpy.ones(3, dtype="T8[Y]") - numpy.zeros(3, dtype="T8[Y]") + >>> numpy.ones(3, dtype="M8[Y]") - numpy.zeros(3, dtype="M8[Y]") array([1, 1, 1], dtype="timedelta64[Y]") But the next should not:: - >>> numpy.ones(3, dtype="T8[Y]") - numpy.zeros(3, dtype="T8[ns]") + >>> numpy.ones(3, dtype="M8[Y]") - numpy.zeros(3, dtype="M8[ns]") raise numpy.IncompatibleUnitError # what unit to choose? @@ -375,15 +376,15 @@ But the next should not:: It will be possible to add and subtract relative times from absolute dates:: - In [10]: numpy.zeros(5, "T8[Y]") + numpy.ones(5, "t8[Y]") + In [10]: numpy.zeros(5, "M8[Y]") + numpy.ones(5, "m8[Y]") Out[10]: array([1971, 1971, 1971, 1971, 1971], dtype=datetime64[Y]) - In [11]: numpy.ones(5, "T8[Y]") - 2 * numpy.ones(5, "t8[Y]") + In [11]: numpy.ones(5, "M8[Y]") - 2 * numpy.ones(5, "m8[Y]") Out[11]: array([1969, 1969, 1969, 1969, 1969], dtype=datetime64[Y]) But not other operations:: - In [12]: numpy.ones(5, "T8[Y]") * numpy.ones(5, "t8[Y]") + In [12]: numpy.ones(5, "M8[Y]") * numpy.ones(5, "m8[Y]") TypeError: unsupported operand type(s) for *: 'numpy.ndarray' and 'numpy.ndarray' Casting rules @@ -408,15 +409,15 @@ Finally, it will be possible to operate with relative times as if they were regular int64 dtypes *as long as* the result can be converted back into a ``timedelta64``:: - In [10]: numpy.ones(3, 't8[us]') + In [10]: numpy.ones(3, 'm8[us]') Out[10]: array([1, 1, 1], dtype="timedelta64[us]") - In [11]: (numpy.ones(3, 't8[M]') + 2) ** 3 + In [11]: (numpy.ones(3, 'm8[M]') + 2) ** 3 Out[11]: array([27, 27, 27], dtype="timedelta64[M]") But:: - In [12]: numpy.ones(5, 't8') + 1j + In [12]: numpy.ones(5, 'm8') + 1j TypeError: the result cannot be converted into a ``timedelta64`` Casting rules @@ -426,14 +427,14 @@ When combining two ``timedelta64`` dtypes with different time units the outcome will be the shorter of both ("keep the precision" rule). For example:: - In [10]: numpy.ones(3, 't8[s]') + numpy.ones(3, 't8[m]') + In [10]: numpy.ones(3, 'm8[s]') + numpy.ones(3, 'm8[m]') Out[10]: array([61, 61, 61], dtype="timedelta64[s]") However, due to the impossibility to know the exact duration of a relative year or a relative month, when these time units appear in one of the operands, the operation will not be allowed:: - In [11]: numpy.ones(3, 't8[Y]') + numpy.ones(3, 't8[D]') + In [11]: numpy.ones(3, 'm8[Y]') + numpy.ones(3, 'm8[D]') raise numpy.IncompatibleUnitError # how to convert relative years to days? In order to being able to perform the above operation a new NumPy @@ -451,11 +452,11 @@ days). With this, the above operation can be done as follows:: - In [10]: t_years = numpy.ones(3, 't8[Y]') + In [10]: t_years = numpy.ones(3, 'm8[Y]') In [11]: t_days = numpy.change_timeunit(t_years, 'D', '2001-01-01') - In [12]: t_days + numpy.ones(3, 't8[D]') + In [12]: t_days + numpy.ones(3, 'm8[D]') Out[12]: array([366, 366, 366], dtype="timedelta64[D]") diff --git a/doc/numpy.scipy.org/Makefile b/doc/numpy.scipy.org/Makefile new file mode 100644 index 000000000..93933ba25 --- /dev/null +++ b/doc/numpy.scipy.org/Makefile @@ -0,0 +1,105 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source + +.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest + +help: + @echo "Please use \`make <target>' where <target> is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf build/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html + @echo + @echo "Build finished. The HTML pages are in build/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) build/dirhtml + @echo + @echo "Build finished. The HTML pages are in build/dirhtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) build/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in build/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) build/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in build/qthelp, like this:" + @echo "# qcollectiongenerator build/qthelp/Numpy.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile build/qthelp/Numpy.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) build/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/Numpy" + @echo "# ln -s build/devhelp $$HOME/.local/share/devhelp/Numpy" + @echo "# devhelp" + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex + @echo + @echo "Build finished; the LaTeX files are in build/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +latexpdf: latex + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex + @echo "Running LaTeX files through pdflatex..." + make -C build/latex all-pdf + @echo "pdflatex finished; the PDF files are in build/latex." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes + @echo + @echo "The overview file is in build/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in build/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) build/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in build/doctest/output.txt." diff --git a/doc/numpy.scipy.org/source/conf.py b/doc/numpy.scipy.org/source/conf.py new file mode 100644 index 000000000..16bd69f83 --- /dev/null +++ b/doc/numpy.scipy.org/source/conf.py @@ -0,0 +1,206 @@ +# -*- coding: utf-8 -*- +# +# Scipy.org documentation build configuration file +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.append(os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.pngmath', + 'sphinx.ext.ifconfig'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Numpy' +copyright = u'2009 Numpy developers' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '' +# The full version, including alpha/beta/rc tags. +release = '' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +#unused_docs = [] + +# List of directories, relative to source directory, that shouldn't be searched +# for source files. +exclude_trees = [] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +html_theme = 'scipy' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +html_theme_path = ["../theme"] + +# The name for this set of Sphinx documents. If None, it defaults to +# "<project> v<release> documentation". +html_title = "Numpy" + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = "" + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +#html_static_path = ['static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_use_modindex = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +html_show_sourcelink = False + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a <link> tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'Scipy.org' + + +# -- Options for LaTeX output -------------------------------------------------- + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'numpy-scipy-org.tex', u'numpy.scipy.org', + u'NumPy collaborators', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_use_modindex = True + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + 'http://docs.python.org/': None, + 'http://docs.scipy.org/doc/numpy/': None, +} diff --git a/doc/numpy.scipy.org/source/index.rst b/doc/numpy.scipy.org/source/index.rst new file mode 100644 index 000000000..ee2ac48fd --- /dev/null +++ b/doc/numpy.scipy.org/source/index.rst @@ -0,0 +1,32 @@ +NumPy is the fundamental package needed for scientific computing with Python. It contains among other things: + +- a powerful N-dimensional array object +- sophisticated (broadcasting) functions +- tools for integrating C/C++ and Fortran code +- useful linear algebra, Fourier transform, and random number capabilities. + +Besides its obvious scientific uses, NumPy can also be used as an efficient multi-dimensional container of generic data. Arbitrary data-types can be defined. This allows NumPy to seamlessly and speedily integrate with a wide variety of databases. + +Getting Started +--------------- + +- `Getting Numpy <http://www.scipy.org/Download>`_ +- `Installing NumPy and SciPy <http://www.scipy.org/Installing_SciPy>`_ +- `NumPy and SciPy documentation page <http://docs.scipy.org/doc/>`_ +- `NumPy Tutorial <http://www.scipy.org/Tentative_NumPy_Tutorial>`_ +- `NumPy for MATLAB© Users <http://www.scipy.org/NumPy_for_Matlab_Users>`_ +- `NumPy functions by category <http://www.scipy.org/Numpy_Functions_by_Category>`_ +- `NumPy Mailing List <http://www.scipy.org/Mailing_Lists>`_ + +More Information +---------------- + +- `NumPy Sourceforge Home Page <http://sourceforge.net/projects/numpy/>`_ +- `SciPy Home Page <http://www.scipy.org/>`_ +- `Interfacing with compiled code <http://www.scipy.org/Topical_Software#head-7153b42ac4ea517c7d99ec4f4453555b2302a1f8>`_ +- :doc:`Older python array packages </old_array_packages>` + +.. toctree:: + :hidden: + + old_array_packages diff --git a/doc/numpy.scipy.org/source/old_array_packages.rst b/doc/numpy.scipy.org/source/old_array_packages.rst new file mode 100644 index 000000000..814083ed6 --- /dev/null +++ b/doc/numpy.scipy.org/source/old_array_packages.rst @@ -0,0 +1,40 @@ +==================== +Older Array Packages +==================== + +It may take months for the large code base that uses Numeric and/or Numarray +to transition to the new NumPy system. Links to the older packages are +provided here. New users should start out with NumPy. + +Much of the documentation for Numeric and Numarray is applicable to the NumPy package. However, there are `significant feature improvements <http://numpy.scipy.org/new_features.html>`_. A complete guide to the new system has been written by the primary developer, Travis Oliphant. It is now in the public domain. Other Documentation is available at `the scipy website <http://www.scipy.org/>`_ and in the docstrings (which can be extracted using pydoc). Free Documentation for Numeric (most of which is still valid) is `here <http://numpy.scipy.org/numpydoc/numdoc.htm>`_ or as a `pdf <http://numpy.scipy.org/numpy.pdf>`_ file. Obviously you should replace references to Numeric in that document with numpy (i.e. instead of "import Numeric", use "import numpy"). + +Upgrading from historical implementations +========================================= + +NumPy derives from the old Numeric code base and can be used as a replacement for Numeric. It also adds the features introduced by Numarray and can also be used to replace Numarray. + +Numeric users should find the transition relatively easy (although not without some effort). There is a module (numpy.oldnumeric.alter_code1) that can makemost of the necessary changes to your Python code that used Numeric to work with NumPy's Numeric compatibility module. + +Users of numarray can also transition their code using a similar module (numpy.numarray.alter_code1) and the numpy.numarray compatibility layer. + +C-code written to either package can be easily ported to NumPy using "numpy/oldnumeric.h" and "numpy/libnumarray.h" for the Numeric C-API and the Numarray C-API respectively. `Sourceforge download site <http://sourceforge.net/project/showfiles.php?group_id=1369&package_id=175103>`_ + +For about 6 months at the end of 2005, the new package was called SciPy Core (not to be confused with the full SciPy package which remains a `separate <http://www.scipy.org/>`_ package), and so you will occasionally see references to SciPy Core floating around. It was decided in January 2006 to go with the historical name of NumPy for the new package. Realize that NumPy (module name numpy) is the new name. Because of the name-change, there were a lot of dicussions that took place on scipy-dev@scipy.org and scipy-user@scipy.org. If you have a question about the new system, you may wish to run a search on those mailing lists as well as the main NumPy list (numpy-discussion@lists.sourceforge.net) + +Numeric (version 24.2) +====================== + +Numeric was the first array object built for Python. It has been quite successful and is used in a wide variety of settings and applications. Maintenance has ceased for Numeric, and users should transisition to NumPy as quickly as possible. There is a module called numpy.oldnumeric.alter_code1 in NumPy that can make the transition to NumPy easier (it will automatically perform the search-and-replace style changes that need to be made to python code that uses Numeric to make it work with NumPy). + +Documentation for Numeric is at http://numpy.scipy.org/numpydoc/numdoc.htm> or as a `pdf <http://numpy.scipy.org/numpy.pdf>`_ file `Sourceforge Numeric Download Page <http://sourceforge.net/project/showfiles.php?group_id=1369&package_id=1351>`_ + +Numarray +======== + +Numarray is another implementation of an array object for Python written after +Numeric and before NumPy. Sponsors of numarray have indicated they will be +moving to NumPy as soon as is feasible for them so that eventually numarray +will be phased out (probably sometime in 2007). This project shares some of +the resources with the Numeric sourceforge site but maintains its own web page +at http://www.stsci.edu/resources/software_hardware/numarray +`Sourceforge Numarray Download Page <http://sourceforge.net/project/showfiles.php?group_id=1369&package_id=32367>`_
\ No newline at end of file diff --git a/doc/numpy.scipy.org/theme/scipy/layout.html b/doc/numpy.scipy.org/theme/scipy/layout.html new file mode 100644 index 000000000..927088ce5 --- /dev/null +++ b/doc/numpy.scipy.org/theme/scipy/layout.html @@ -0,0 +1,16 @@ +{% extends "sphinxdoc/layout.html" %} + +{% block rootrellink %} +{% endblock %} + +{% block relbar1 %} +<div class="top-logo-header"> +<a href="{{ pathto('index') }}"><img src="{{ pathto("_static/numpy_logo.png", 1) }}" border="0" alt="NumPy Homepage"/></a> +</div> +{% endblock %} + +{% block relbar2 %}{% endblock %} + +{# put the sidebar before the body #} +{% block sidebar1 %}{% endblock %} +{% block sidebar2 %}{% endblock %} diff --git a/doc/numpy.scipy.org/theme/scipy/static/logo.gif b/doc/numpy.scipy.org/theme/scipy/static/logo.gif Binary files differnew file mode 100644 index 000000000..ab2e3ac33 --- /dev/null +++ b/doc/numpy.scipy.org/theme/scipy/static/logo.gif diff --git a/doc/numpy.scipy.org/theme/scipy/static/numpy_logo.png b/doc/numpy.scipy.org/theme/scipy/static/numpy_logo.png Binary files differnew file mode 100644 index 000000000..5ed48c094 --- /dev/null +++ b/doc/numpy.scipy.org/theme/scipy/static/numpy_logo.png diff --git a/doc/numpy.scipy.org/theme/scipy/static/scipy.css b/doc/numpy.scipy.org/theme/scipy/static/scipy.css new file mode 100644 index 000000000..377c45955 --- /dev/null +++ b/doc/numpy.scipy.org/theme/scipy/static/scipy.css @@ -0,0 +1,200 @@ +@import "sphinxdoc.css"; + +/** + * Spacing fixes + */ + + +div.body { + width: 90%; +} + +div.bodywrapper { + width: 100%; +} + +div.body p, div.body dd, div.body li { + line-height: 125%; +} + +ul.simple { + margin-top: 0; + margin-bottom: 0; + padding-top: 0; + padding-bottom: 0; +} + +/* spacing around blockquoted fields in parameters/attributes/returns */ +td.field-body > blockquote { + margin-top: 0.1em; + margin-bottom: 0.5em; +} + +/* spacing around example code */ +div.highlight > pre { + padding: 2px 5px 2px 5px; +} + +/* spacing in see also definition lists */ +dl.last > dd { + margin-top: 1px; + margin-bottom: 5px; + margin-left: 30px; +} + +/** + * Hide dummy toctrees + */ + +ul { + padding-top: 0; + padding-bottom: 0; + margin-top: 0; + margin-bottom: 0; +} +ul li { + padding-top: 0; + padding-bottom: 0; + margin-top: 0; + margin-bottom: 0; +} +ul li a.reference { + padding-top: 0; + padding-bottom: 0; + margin-top: 0; + margin-bottom: 0; +} + +/** + * Make high-level subsections easier to distinguish from top-level ones + */ +div.body h3 { + background-color: transparent; +} + +div.body h4 { + border: none; + background-color: transparent; +} + +/** + * Scipy colors + */ + +body { + background-color: rgb(100,135,220); + border: none; +} + +div.sphinxsidebar { + display: none; +} + +div.sphinxsidebar h3 { + color: rgb(0,102,204); +} + +div.sphinxsidebar h4 { + color: rgb(0,82,194); +} + +div.sphinxsidebar p { + color: black; +} + +div.sphinxsidebar ul.want-points { + list-style: disc; +} + +.field-list th { + color: rgb(0,102,204); +} + +/** + * Extra admonitions + */ + +div.tip { + background-color: #ffffe4; + border: 1px solid #ee6; +} + +div.plot-output { + clear-after: both; +} + +div.plot-output .figure { + float: left; + text-align: center; + margin-bottom: 0; + padding-bottom: 0; +} + +div.plot-output .caption { + margin-top: 2; + padding-top: 0; +} + +div.plot-output p.admonition-title { + display: none; +} + +div.plot-output:after { + content: ""; + display: block; + height: 0; + clear: both; +} + + +/* +div.admonition-example { + background-color: #e4ffe4; + border: 1px solid #ccc; +}*/ + + +/** + * Styling for field lists + */ + +table.field-list th { + border-left: 1px solid #aaa !important; + padding-left: 5px; +} + +table.field-list { + border-collapse: separate; + border-spacing: 10px; +} + +/** + * Styling for footnotes + */ + +table.footnote td, table.footnote th { + border: none; +} + +/** + * Top header logo and colors + */ +body div.top-logo-header { + text-align: left; + background-color: #8CAAE6; + border-bottom: 8px solid #003399; + border-top: 10px solid #6487dc; +} + +/** + * Footer colors + */ +body div.footer { + background-color: #6487dc; + border: none; + color: white; + text-align: center; +} +body div.footer a { + color: white; +} diff --git a/doc/numpy.scipy.org/theme/scipy/theme.conf b/doc/numpy.scipy.org/theme/scipy/theme.conf new file mode 100644 index 000000000..e2f5ed848 --- /dev/null +++ b/doc/numpy.scipy.org/theme/scipy/theme.conf @@ -0,0 +1,4 @@ +[theme] +inherit = sphinxdoc +stylesheet = scipy.css +pygments_css = friendly diff --git a/doc/release/1.4.0-notes.rst b/doc/release/1.4.0-notes.rst new file mode 100644 index 000000000..849b429cb --- /dev/null +++ b/doc/release/1.4.0-notes.rst @@ -0,0 +1,132 @@ +========================= +NumPy 1.4.0 Release Notes +========================= + +This minor includes numerous bug fixes as well as a few new features. + +Highlights +========== + +New features +============ + +Extended array wrapping mechanism for ufuncs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +An __array_prepare__ method has been added to ndarray to provide subclasses +greater flexibility to interact with ufuncs and ufunc-like functions. ndarray +already provided __array_wrap__, which allowed subclasses to set the array type +for the result and populate metadata on the way out of the ufunc (as seen in +the implementation of MaskedArray). For some applications it is necessary to +provide checks and populate metadata *on the way in*. __array_prepare__ is +therefore called just after the ufunc has initialized the output array but +before computing the results and populating it. This way, checks can be made +and errors raised before operations which may modify data in place. + +Automatic detection of forward incompatibilities +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Previously, if an extension was built against a version N of NumPy, and used on +a system with NumPy M < N, the import_array was successfull, which could cause +crashes because the version M does not have a function in N. Starting from +NumPy 1.4.0, this will cause a failure in import_array, so the error will be +catched early on. + +New C API +~~~~~~~~~ + +The following C functions have been added to the C API: + + #. PyArray_GetNDArrayCFeatureVersion: return the *API* version of the + loaded numpy. + #. PyArray_Correlate2 - like PyArray_Correlate, but implements the usual + definition of correlation. Inputs are not swapped, and conjugate is + taken for complex arrays. + #. PyArray_NeighborhoodIterNew - a new iterator to iterate over a + neighborhood of a point, with automatic boundaries handling. + +New ufuncs +~~~~~~~~~~ + +The following ufuncs have been added to the C API: + + #. copysign - return the value of the first argument with the sign copied + from the second argument. + +New defines +~~~~~~~~~~~ + +The alpha processor is now defined and available in numpy/npy_cpu.h. The +failed detection of the PARISC processor has been fixed. The defines are: + + #. NPY_CPU_HPPA: PARISC + #. NPY_CPU_ALPHA: Alpha + +Enhancements +~~~~~~~~~~~~ + + #. The sort functions now sort nans to the end. + + * Real sort order is [R, nan] + * Complex sort order is [R + Rj, R + nanj, nan + Rj, nan + nanj] + + Complex numbers with the same nan placements are sorted according to + the non-nan part if it exists. + #. The type comparison functions have been made consistent with the new + sort order of nans. Searchsorted now works with sorted arrays + containing nan values. + #. Complex division has been made more resistent to overflow. + #. Complex floor division has been made more resistent to overflow. + +Testing +~~~~~~~ + + #. deprecated decorator: this decorator may be used to avoid cluttering + testing output while testing DeprecationWarning is effectively raised by + the decorated test. + + +Deprecations +============ + +The following functions are deprecated: + + #. correlate: it takes a new keyword argument old_behavior. When True (the + default), it returns the same result as before. When False, compute the + conventional correlation, and take the conjugate for complex arrays. The + old behavior will be removed in NumPy 1.5, and raises a + DeprecationWarning in 1.4. + +New iterators +~~~~~~~~~~~~~ + +A new neighborhood iterator has been added to the C API. It can be used to +iterate over the items in a neighborhood of an array, and can handle boundaries +conditions automatically. Zero and one padding are available, as well as +arbitrary constant value, mirror and circular padding. + +Internal changes +================ + +split multiarray and umath source code +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The source code of multiarray and umath has been split into separate logic +compilation units. This should make the source code more amenable for +newcomers. + +Separate compilation +~~~~~~~~~~~~~~~~~~~~ + +By default, every file of multiarray (and umath) is merged into one for +compilation as was the case before, but if NPY_SEPARATE_COMPILATION env +variable is set to a non-negative value, experimental individual compilation of +each file is enabled. This makes the compile/debug cycle much faster when +working on core numpy. + +Separate core math library +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +New functions which have been added: + + * npy_copysign diff --git a/doc/source/_templates/autosummary/class.rst b/doc/source/_templates/autosummary/class.rst new file mode 100644 index 000000000..0cabe7cd1 --- /dev/null +++ b/doc/source/_templates/autosummary/class.rst @@ -0,0 +1,23 @@ +{% extends "!autosummary/class.rst" %} + +{% block methods %} +{% if methods %} + .. HACK + .. autosummary:: + :toctree: + {% for item in methods %} + {{ name }}.{{ item }} + {%- endfor %} +{% endif %} +{% endblock %} + +{% block attributes %} +{% if attributes %} + .. HACK + .. autosummary:: + :toctree: + {% for item in attributes %} + {{ name }}.{{ item }} + {%- endfor %} +{% endif %} +{% endblock %} diff --git a/doc/source/conf.py b/doc/source/conf.py index 318011e8a..b610470de 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -18,12 +18,13 @@ sys.path.insert(0, os.path.abspath('../sphinxext')) extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', - 'only_directives', 'plot_directive'] + 'plot_directive'] if sphinx.__version__ >= "0.7": extensions.append('sphinx.ext.autosummary') else: extensions.append('autosummary') + extensions.append('only_directives') # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -44,6 +45,7 @@ copyright = '2008-2009, The Scipy community' import numpy # The short X.Y version (including .devXXXX, rcX, b1 suffixes if present) version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__) +version = re.sub(r'(\.dev\d+).*?$', r'\1', version) # The full version, including alpha/beta/rc tags. release = numpy.__version__ print version, release @@ -250,6 +252,9 @@ np.random.seed(0) plot_include_source = True plot_formats = [('png', 100), 'pdf'] +import math +phi = (math.sqrt(5) + 1)/2 + import matplotlib matplotlib.rcParams.update({ 'font.size': 8, @@ -258,7 +263,7 @@ matplotlib.rcParams.update({ 'xtick.labelsize': 8, 'ytick.labelsize': 8, 'legend.fontsize': 8, - 'figure.figsize': (3.236068, 2), + 'figure.figsize': (3*phi, 3), 'figure.subplot.bottom': 0.2, 'figure.subplot.left': 0.2, 'figure.subplot.right': 0.9, diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 671c95bbf..f9abfbafa 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -47,14 +47,29 @@ customize: update meta-information from the "parent." Subclasses inherit a default implementation of this method that does nothing. -.. function:: __array_wrap__(array) - - This method should return an instance of the subclass from the - :class:`ndarray` object passed in. For example, this is called - after every :ref:`ufunc <ufuncs.output-type>` for the object with - the highest array priority. The ufunc-computed array object is - passed in and whatever is returned is passed to the - user. Subclasses inherit a default implementation of this method. +.. function:: __array_prepare__(array, context=None) + + At the beginning of every :ref:`ufunc <ufuncs.output-type>`, this + method is called on the input object with the highest array + priority, or the output object if one was specified. The output + array is passed in and whatever is returned is passed to the ufunc. + Subclasses inherit a default implementation of this method which + simply returns the output array unmodified. Subclasses may opt to + use this method to transform the output array into an instance of + the subclass and update metadata before returning the array to the + ufunc for computation. + +.. function:: __array_wrap__(array, context=None) + + At the end of every :ref:`ufunc <ufuncs.output-type>`, this method + is called on the input object with the highest array priority, or + the output object if one was specified. The ufunc-computed array + is passed in and whatever is returned is passed to the user. + Subclasses inherit a default implementation of this method, which + transforms the array into a new instance of the object's class. Subclasses + may opt to use this method to transform the output array into an + instance of the subclass and update metadata before returning the + array to the user. .. data:: __array_priority__ diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst index 0def05ced..1bf7d1ac8 100644 --- a/doc/source/reference/arrays.ndarray.rst +++ b/doc/source/reference/arrays.ndarray.rst @@ -9,14 +9,14 @@ The N-dimensional array (:class:`ndarray`) An :class:`ndarray` is a (usually fixed-size) multidimensional container of items of the same type and size. The number of dimensions and items in an array is defined by its :attr:`shape <ndarray.shape>`, -which is a :class:`tuple` of *N* integers that specify the sizes of +which is a :class:`tuple` of *N* positive integers that specify the sizes of each dimension. The type of items in the array is specified by a separate :ref:`data-type object (dtype) <arrays.dtypes>`, one of which is associated with each ndarray. -As with other container objects in Python, the contents of a +As with other container objects in Python, the contents of an :class:`ndarray` can be accessed and modified by :ref:`indexing or -slicing <arrays.indexing>` the array (using for example *N* integers), +slicing <arrays.indexing>` the array (using, for example, *N* integers), and via the methods and attributes of the :class:`ndarray`. .. index:: view, base @@ -42,15 +42,19 @@ objects implementing the :class:`buffer` or :ref:`array >>> x.dtype dtype('int32') - The array can be indexed using a Python container-like syntax: + The array can be indexed using Python container-like syntax: - >>> x[1,2] + >>> x[1,2] # i.e., the element of x in the *second* row, *third* column 6 For example :ref:`slicing <arrays.indexing>` can produce views of the array: >>> y = x[:,1] - >>> y[0] = 9 + >>> y + array([2, 5]) + >>> y[0] = 9 # this also changes the corresponding element in x + >>> y + array([9, 5]) >>> x array([[1, 9, 3], [4, 5, 6]]) @@ -95,7 +99,7 @@ the bytes are interpreted is defined by the :ref:`data-type object .. index:: C-order, Fortran-order, row-major, column-major, stride, offset A segment of memory is inherently 1-dimensional, and there are many -different schemes of arranging the items of an *N*-dimensional array to +different schemes for arranging the items of an *N*-dimensional array in a 1-dimensional block. Numpy is flexible, and :class:`ndarray` objects can accommodate any *strided indexing scheme*. In a strided scheme, the N-dimensional index :math:`(n_0, n_1, ..., n_{N-1})` corresponds @@ -105,10 +109,10 @@ to the offset (in bytes) from the beginning of the memory block associated with the array. Here, :math:`s_k` are integers which specify the :obj:`strides -<ndarray.strides>` of the array. The :term:`column-major` order (used -for example in the Fortran language and in *Matlab*) and -:term:`row-major` order (used in C) are special cases of the strided -scheme, and correspond to the strides: +<ndarray.strides>` of the array. The :term:`column-major` order (used, +for example, in the Fortran language and in *Matlab*) and +:term:`row-major` order (used in C) schemes are just specific kinds of +strided scheme, and correspond to the strides: .. math:: @@ -116,12 +120,12 @@ scheme, and correspond to the strides: .. index:: single-segment, contiguous, non-contiguous -Both the C and Fortran orders are :term:`contiguous`, *i.e.* +Both the C and Fortran orders are :term:`contiguous`, *i.e.,* :term:`single-segment`, memory layouts, in which every part of the memory block can be accessed by some combination of the indices. Data in new :class:`ndarrays <ndarray>` is in the :term:`row-major` -(C) order, unless otherwise specified, but for example :ref:`basic +(C) order, unless otherwise specified, but, for example, :ref:`basic array slicing <arrays.indexing>` often produces :term:`views <view>` in a different scheme. @@ -227,7 +231,8 @@ Array methods An :class:`ndarray` object has many methods which operate on or with the array in some fashion, typically returning an array result. These -methods are explained below. +methods are briefly explained below. (Each method's doc string has a +more complete description.) For the following methods there are also corresponding functions in :mod:`numpy`: :func:`all`, :func:`any`, :func:`argmax`, @@ -433,7 +438,7 @@ Arithmetic: .. note:: - Any third argument to :func:`pow()` is silently ignored, - as the underlying :func:`ufunc <power>` only takes two arguments. + as the underlying :func:`ufunc <power>` takes only two arguments. - The three division operators are all defined; :obj:`div` is active by default, :obj:`truediv` is active when @@ -472,7 +477,7 @@ Arithmetic, in-place: the array. Therefore, for mixed precision calculations, ``A {op}= B`` can be different than ``A = A {op} B``. For example, suppose ``a = ones((3,3))``. Then, ``a += 3j`` is different than ``a = a + - 3j``: While they both perform the same computation, ``a += 3`` + 3j``: while they both perform the same computation, ``a += 3`` casts the result to fit back in ``a``, whereas ``a = a + 3j`` re-binds the name ``a`` to the result. diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index 33d5ceff6..75daf2a08 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -204,8 +204,6 @@ elements the data type consists of.) :mod:`struct` module. -.. note:: XXX: what to put in the type docstrings, and where to put them? - Attributes ========== @@ -235,7 +233,6 @@ attribute. Otherwise, they share the same attributes as arrays: generic.__array_priority__ generic.__array_wrap__ -.. note:: XXX: import the documentation into the docstrings? Indexing ======== @@ -273,7 +270,6 @@ The exceptions to the above rules are given below: generic.__setstate__ generic.setflags -.. note:: XXX: import the documentation into the docstrings? Defining new types ================== diff --git a/doc/source/reference/c-api.array.rst b/doc/source/reference/c-api.array.rst index 654201d73..8ce362079 100644 --- a/doc/source/reference/c-api.array.rst +++ b/doc/source/reference/c-api.array.rst @@ -251,14 +251,16 @@ From other objects .. cfunction:: PyObject* PyArray_FromAny(PyObject* op, PyArray_Descr* dtype, int min_depth, int max_depth, int requirements, PyObject* context) This is the main function used to obtain an array from any nested - sequence, or object that exposes the array interface, ``op``. The - parameters allow specification of the required *type*, the + sequence, or object that exposes the array interface, *op*. The + parameters allow specification of the required *dtype*, the minimum (*min_depth*) and maximum (*max_depth*) number of dimensions acceptable, and other *requirements* for the array. The *dtype* argument needs to be a :ctype:`PyArray_Descr` structure indicating the desired data-type (including required byteorder). The *dtype* argument may be NULL, indicating that any - data-type (and byteorder) is acceptable. If you want to use + data-type (and byteorder) is acceptable. Unless ``FORCECAST`` is + present in ``flags``, this call will generate an error if the data + type cannot be safely obtained from the object. If you want to use ``NULL`` for the *dtype* and ensure the array is notswapped then use :cfunc:`PyArray_CheckFromAny`. A value of 0 for either of the depth parameters causes the parameter to be ignored. Any of the @@ -270,7 +272,8 @@ From other objects filled from *op* using the sequence protocol). The new array will have :cdata:`NPY_DEFAULT` as its flags member. The *context* argument is passed to the :obj:`__array__` method of *op* and is only used if - the array is constructed that way. + the array is constructed that way. Almost always this + parameter is ``NULL``. .. cvar:: NPY_C_CONTIGUOUS @@ -1001,6 +1004,24 @@ Special functions for PyArray_OBJECT Array flags ----------- +The ``flags`` attribute of the ``PyArrayObject`` structure contains +important information about the memory used by the array (pointed to +by the data member) This flag information must be kept accurate or +strange results and even segfaults may result. + +There are 6 (binary) flags that describe the memory area used by the +data buffer. These constants are defined in ``arrayobject.h`` and +determine the bit-position of the flag. Python exposes a nice +attribute- based interface as well as a dictionary-like interface for +getting (and, if appropriate, setting) these flags. + +Memory areas of all kinds can be pointed to by an ndarray, +necessitating these flags. If you get an arbitrary ``PyArrayObject`` +in C-code, you need to be aware of the flags that are set. If you +need to guarantee a certain kind of array (like ``NPY_CONTIGUOUS`` and +``NPY_BEHAVED``), then pass these requirements into the +PyArray_FromAny function. + Basic Array Flags ^^^^^^^^^^^^^^^^^ @@ -1023,6 +1044,12 @@ associated with an array. The data area is in Fortran-style contiguous order (first index varies the fastest). +Notice that contiguous 1-d arrays are always both ``NPY_FORTRAN`` +contiguous and C contiguous. Both of these flags can be checked and +are convenience flags only as whether or not an array is +``NPY_CONTIGUOUS`` or ``NPY_FORTRAN`` can be determined by the +``strides``, ``dimensions``, and ``itemsize`` attributes. + .. cvar:: NPY_OWNDATA The data area is owned by this array. @@ -1043,6 +1070,24 @@ associated with an array. The data area represents a (well-behaved) copy whose information should be transferred back to the original when this array is deleted. + This is a special flag that is set if this array represents a copy + made because a user required certain flags in + :cfunc:`PyArray_FromAny` and a copy had to be made of some other + array (and the user asked for this flag to be set in such a + situation). The base attribute then points to the "misbehaved" + array (which is set read_only). When the array with this flag set + is deallocated, it will copy its contents back to the "misbehaved" + array (casting if necessary) and will reset the "misbehaved" array + to :cdata:`NPY_WRITEABLE`. If the "misbehaved" array was not + :cdata:`NPY_WRITEABLE` to begin with then :cfunc:`PyArray_FromAny` + would have returned an error because :cdata:`NPY_UPDATEIFCOPY` + would not have been possible. + +:cfunc:`PyArray_UpdateFlags` (obj, flags) will update the +``obj->flags`` for ``flags`` which can be any of +:cdata:`NPY_CONTIGUOUS`, :cdata:`NPY_FORTRAN`, :cdata:`NPY_ALIGNED`, +or :cdata:`NPY_WRITEABLE`. + Combinations of array flags ^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1721,6 +1766,29 @@ Array Functions *op1*, 2 - return all possible shifts (any overlap at all is accepted). + .. rubric:: Notes + + This does not compute the usual correlation: if op2 is larger than op1, the + arguments are swapped, and the conjugate is never taken for complex arrays. + See PyArray_Correlate2 for the usual signal processing correlation. + +.. cfunction:: PyObject* PyArray_Correlate2(PyObject* op1, PyObject* op2, int mode) + + Updated version of PyArray_Correlate, which uses the usual definition of + correlation for 1d arrays. The correlation is computed at each output point + by multiplying *op1* by a shifted version of *op2* and summing the result. + As a result of the shift, needed values outside of the defined range of + *op1* and *op2* are interpreted as zero. The mode determines how many + shifts to return: 0 - return only shifts that did not need to assume zero- + values; 1 - return an object that is the same size as *op1*, 2 - return all + possible shifts (any overlap at all is accepted). + + .. rubric:: Notes + + Compute z as follows:: + + z[k] = sum_n op1[n] * conj(op2[n+k]) + .. cfunction:: PyObject* PyArray_Where(PyObject* condition, PyObject* x, PyObject* y) If both ``x`` and ``y`` are ``NULL``, then return @@ -1901,6 +1969,96 @@ Broadcasting (multi-iterators) loop should be performed over the axis that won't require large stride jumps. +Neighborhood iterator +--------------------- + +.. versionadded:: 1.4.0 + +Neighborhood iterators are subclasses of the iterator object, and can be used +to iter over a neighborhood of a point. For example, you may want to iterate +over every voxel of a 3d image, and for every such voxel, iterate over an +hypercube. Neighborhood iterator automatically handle boundaries, thus making +this kind of code much easier to write than manual boundaries handling, at the +cost of a slight overhead. + +.. cfunction:: PyObject* PyArray_NeighborhoodIterNew(PyArrayIterObject* iter, npy_intp bounds, int mode, PyArrayObject* fill_value) + + This function creates a new neighborhood iterator from an existing + iterator. The neighborhood will be computed relatively to the position + currently pointed by *iter*, the bounds define the shape of the + neighborhood iterator, and the mode argument the boundaries handling mode. + + The *bounds* argument is expected to be a (2 * iter->ao->nd) arrays, such + as the range bound[2*i]->bounds[2*i+1] defines the range where to walk for + dimension i (both bounds are included in the walked coordinates). The + bounds should be ordered for each dimension (bounds[2*i] <= bounds[2*i+1]). + + The mode should be one of: + + * NPY_NEIGHBORHOOD_ITER_ZERO_PADDING: zero padding. Outside bounds values + will be 0. + * NPY_NEIGHBORHOOD_ITER_ONE_PADDING: one padding, Outside bounds values + will be 1. + * NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING: constant padding. Outside bounds + values will be the same as the first item in fill_value. + * NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING: mirror padding. Outside bounds + values will be as if the array items were mirrored. For example, for the + array [1, 2, 3, 4], x[-2] will be 2, x[-2] will be 1, x[4] will be 4, + x[5] will be 1, etc... + * NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING: circular padding. Outside bounds + values will be as if the array was repeated. For example, for the + array [1, 2, 3, 4], x[-2] will be 3, x[-2] will be 4, x[4] will be 1, + x[5] will be 2, etc... + + If the mode is constant filling (NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING), + fill_value should point to an array object which holds the filling value + (the first item will be the filling value if the array contains more than + one item). For other cases, fill_value may be NULL. + + - The iterator holds a reference to iter + - Return NULL on failure (in which case the reference count of iter is not + changed) + - iter itself can be a Neighborhood iterator: this can be useful for .e.g + automatic boundaries handling + - the object returned by this function should be safe to use as a normal + iterator + - If the position of iter is changed, any subsequent call to + PyArrayNeighborhoodIter_Next is undefined behavior, and + PyArrayNeighborhoodIter_Reset must be called. + + .. code-block:: c + + PyArrayIterObject \*iter; + PyArrayNeighborhoodIterObject \*neigh_iter; + iter = PyArray_IterNew(x); + + //For a 3x3 kernel + bounds = {-1, 1, -1, 1}; + neigh_iter = (PyArrayNeighborhoodIterObject*)PyArrayNeighborhoodIter_New( + iter, bounds, NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, NULL); + + for(i = 0; i < iter->size; ++i) { + for (j = 0; j < neigh_iter->size; ++j) { + // Walk around the item currently pointed by iter->dataptr + PyArrayNeighborhoodIter_Next(neigh_iter); + } + + // Move to the next point of iter + PyArrayIter_Next(iter); + PyArrayNeighborhoodIter_Reset(neigh_iter); + } + +.. cfunction:: int PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter) + + Reset the iterator position to the first point of the neighborhood. This + should be called whenever the iter argument given at + PyArray_NeighborhoodIterObject is changed (see example) + +.. cfunction:: int PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter) + + After this call, iter->dataptr points to the next point of the + neighborhood. Calling this function after every point of the + neighborhood has been visited is undefined. Array Scalars ------------- diff --git a/doc/source/reference/c-api.config.rst b/doc/source/reference/c-api.config.rst index 0c7f6b147..0989c53d7 100644 --- a/doc/source/reference/c-api.config.rst +++ b/doc/source/reference/c-api.config.rst @@ -89,8 +89,8 @@ Platform information .. versionadded:: 1.3.0 Portable alternatives to the ``endian.h`` macros of GNU Libc. - One of :cdata:`NPY_BIG_ENDIAN` :cdata:`NPY_LITTLE_ENDIAN` or - is defined, and :cdata:`NPY_BYTE_ORDER` is either 4321 or 1234. + If big endian, :cdata:`NPY_BYTE_ORDER` == :cdata:`NPY_BIG_ENDIAN`, and + similarly for little endian architectures. Defined in ``numpy/npy_endian.h``. diff --git a/doc/source/reference/c-api.coremath.rst b/doc/source/reference/c-api.coremath.rst index 8e6011603..b1a356551 100644 --- a/doc/source/reference/c-api.coremath.rst +++ b/doc/source/reference/c-api.coremath.rst @@ -69,6 +69,14 @@ Floating point classification and extended precision, and return a non 0 value is x has the signbit set (that is the number is negative). +.. cfunction:: double npy_copysign(double x, double y) + + This is a function equivalent to C99 copysign: return x with the same sign + as y. Works for any value, including inf and nan. Single and extended + precisions are available with suffix f and l. + + .. versionadded:: 1.4.0 + Useful math constants ~~~~~~~~~~~~~~~~~~~~~ @@ -114,3 +122,22 @@ precision are also available by adding the F and L suffixes respectively. .. cvar:: NPY_2_PI Two times the reciprocal of pi (:math:`\frac{2}{\pi}`) + +.. cvar:: NPY_EULER + + The Euler constant (:math:`\lim_{n\rightarrow \infty}{\sum_{k=1}^n{\frac{1}{k}} - \ln n}`) + +Linking against the core math library in an extension +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. versionadded:: 1.4.0 + +To use the core math library in your own extension, you need to add the npymath +compile and link options to your extension in your setup.py: + + >>> from numpy.distutils.misc_utils import get_info + >>> info = get_info('npymath') + >>> config.add_extension('foo', sources=['foo.c'], extra_info=**info) + +In other words, the usage of info is exactly the same as when using blas_info +and co. diff --git a/doc/source/reference/generalized_ufuncs.rst b/doc/source/reference/c-api.generalized-ufuncs.rst index d9f3818b9..870e5dbc4 100644 --- a/doc/source/reference/generalized_ufuncs.rst +++ b/doc/source/reference/c-api.generalized-ufuncs.rst @@ -1,6 +1,6 @@ -=============================== -Generalized Universal Functions -=============================== +================================== +Generalized Universal Function API +================================== There is a general need for looping over not only functions on scalars but also over functions on vectors (or arrays), as explained on @@ -91,14 +91,14 @@ dimensions. The signature is represented by a string of the following format: * Core dimensions of each input or output array are represented by a - list of dimension names in parentheses, ``(i_1,...,i_N)``; a scalar + list of dimension names in parentheses, ``(i_1,...,i_N)``; a scalar input/output is denoted by ``()``. Instead of ``i_1``, ``i_2``, etc, one can use any valid Python variable name. * Dimension lists for different arguments are separated by ``","``. Input/output arguments are separated by ``"->"``. * If one uses the same dimension name in multiple locations, this enforces the same size (or broadcastable size) of the corresponding - dimensions. + dimensions. The formal syntax of signatures is as follows:: diff --git a/doc/source/reference/c-api.rst b/doc/source/reference/c-api.rst index 158e04a16..9bcc68b49 100644 --- a/doc/source/reference/c-api.rst +++ b/doc/source/reference/c-api.rst @@ -45,4 +45,5 @@ code. c-api.dtype c-api.array c-api.ufunc + c-api.generalized-ufuncs c-api.coremath diff --git a/doc/source/reference/c-api.types-and-structures.rst b/doc/source/reference/c-api.types-and-structures.rst index b99702e11..ece7f341b 100644 --- a/doc/source/reference/c-api.types-and-structures.rst +++ b/doc/source/reference/c-api.types-and-structures.rst @@ -917,6 +917,18 @@ PyArrayMultiIter_Type to be broadcast together. On return, the iterators are adjusted for broadcasting. +PyArrayNeighborhoodIter_Type +---------------------------- + +.. cvar:: PyArrayNeighborhoodIter_Type + + This is an iterator object that makes it easy to loop over an N-dimensional + neighborhood. + +.. ctype:: PyArrayNeighborhoodIterObject + + The C-structure corresponding to an object of :cdata:`PyArrayNeighborhoodIter_Type` is + the :ctype:`PyArrayNeighborhoodIterObject`. PyArrayFlags_Type ----------------- diff --git a/doc/source/reference/c-api.ufunc.rst b/doc/source/reference/c-api.ufunc.rst index bd0ee8e02..6a6a0dff0 100644 --- a/doc/source/reference/c-api.ufunc.rst +++ b/doc/source/reference/c-api.ufunc.rst @@ -70,43 +70,45 @@ Functions operation. Each ufunc object contains pointers to 1-d loops implementing the basic functionality for each supported type. - :param nin: - - The number of inputs to this operation. - - :param nout: - - The number of outputs - - :param ntypes: + .. note:: - How many different data-type "signatures" the ufunc has implemented. + The *func*, *data*, *types*, *name*, and *doc* arguments are not + copied by :cfunc:`PyUFunc_FromFuncAndData`. The caller must ensure + that the memory used by these arrays is not freed as long as the + ufunc object is alive. :param func: - Must to an array of length *ntypes* containing :ctype:`PyUFuncGenericFunction` items. These items are pointers to - functions that acutally implement the underlying - (element-by-element) function :math:`N` times. T + functions that actually implement the underlying + (element-by-element) function :math:`N` times. - :param types: + :param data: + Should be ``NULL`` or a pointer to an array of size *ntypes* + . This array may contain arbitrary extra-data to be passed to + the corresponding 1-d loop function in the func array. + :param types: Must be of length (*nin* + *nout*) \* *ntypes*, and it contains the data-types (built-in only) that the corresponding function in the *func* array can deal with. - :param data: + :param ntypes: + How many different data-type "signatures" the ufunc has implemented. - Should be ``NULL`` or a pointer to an array of size *ntypes* - . This array may contain arbitrary extra-data to be passed to - the corresponding 1-d loop function in the func array. + :param nin: + The number of inputs to this operation. - :param name: + :param nout: + The number of outputs + :param identity: + XXX: Undocumented + + :param name: The name for the ufunc. :param doc: - Allows passing in a documentation string to be stored with the ufunc. The documentation string should not contain the name of the function or the calling signature as that will be @@ -114,7 +116,6 @@ Functions accessing the **__doc__** attribute of the ufunc. :param check_return: - Unused and present for backwards compatibility of the C-API. A corresponding *check_return* integer does exist in the ufunc structure and it does get set with this value when the ufunc diff --git a/doc/source/reference/distutils.rst b/doc/source/reference/distutils.rst index 051a1c031..bb01a529a 100644 --- a/doc/source/reference/distutils.rst +++ b/doc/source/reference/distutils.rst @@ -67,223 +67,49 @@ misc_util files in the numpy distribution are good examples of how to use the :class:`Configuration` instance. - .. method:: todict() - - Return a dictionary compatible with the keyword arguments of distutils - setup function. Thus, this method may be used as - setup(\**config.todict()). - - .. method:: get_distribution() - - Return the distutils distribution object for self. - - .. method:: get_subpackage(subpackage_name, subpackage_path=None) - - Return a Configuration instance for the sub-package given. If - subpackage_path is None then the path is assumed to be the local path - plus the subpackage_name. If a setup.py file is not found in the - subpackage_path, then a default configuration is used. - - .. method:: add_subpackage(subpackage_name, subpackage_path=None) - - Add a sub-package to the current Configuration instance. This is - useful in a setup.py script for adding sub-packages to a package. The - sub-package is contained in subpackage_path / subpackage_name and this - directory may contain a setup.py script or else a default setup - (suitable for Python-code-only subpackages) is assumed. If the - subpackage_path is None, then it is assumed to be located in the local - path / subpackage_name. - - .. method:: self.add_data_files(*files) - - Add files to the list of data_files to be included with the package. - The form of each element of the files sequence is very flexible - allowing many combinations of where to get the files from the package - and where they should ultimately be installed on the system. The most - basic usage is for an element of the files argument sequence to be a - simple filename. This will cause that file from the local path to be - installed to the installation path of the self.name package (package - path). The file argument can also be a relative path in which case the - entire relative path will be installed into the package directory. - Finally, the file can be an absolute path name in which case the file - will be found at the absolute path name but installed to the package - path. - - This basic behavior can be augmented by passing a 2-tuple in as the - file argument. The first element of the tuple should specify the - relative path (under the package install directory) where the - remaining sequence of files should be installed to (it has nothing to - do with the file-names in the source distribution). The second element - of the tuple is the sequence of files that should be installed. The - files in this sequence can be filenames, relative paths, or absolute - paths. For absolute paths the file will be installed in the top-level - package installation directory (regardless of the first argument). - Filenames and relative path names will be installed in the package - install directory under the path name given as the first element of - the tuple. An example may clarify:: - - self.add_data_files('foo.dat', - ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), - 'bar/cat.dat', - '/full/path/to/can.dat') - - will install these data files to:: - - <package install directory>/ - foo.dat - fun/ - gun.dat - nun/ - pun.dat - sun.dat - bar/ - car.dat - can.dat - - where <package install directory> is the package (or sub-package) - directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: \\Python2.4 \\Lib \\site-packages \\mypackage') or '/usr/lib/python2.4/site- - packages/mypackage/mysubpackage' ('C: \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). - - - An additional feature is that the path to a data-file can actually be - a function that takes no arguments and returns the actual path(s) to - the data-files. This is useful when the data files are generated while - building the package. - - .. method:: add_data_dir(data_path) - - Recursively add files under data_path to the list of data_files to be - installed (and distributed). The data_path can be either a relative - path-name, or an absolute path-name, or a 2-tuple where the first - argument shows where in the install directory the data directory - should be installed to. For example suppose the source directory - contains fun/foo.dat and fun/bar/car.dat:: - - self.add_data_dir('fun') - self.add_data_dir(('sun', 'fun')) - self.add_data_dir(('gun', '/full/path/to/fun')) - - Will install data-files to the locations:: - - <package install directory>/ - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - car.dat - - .. method:: add_include_dirs(*paths) - - Add the given sequence of paths to the beginning of the include_dirs - list. This list will be visible to all extension modules of the - current package. - - .. method:: add_headers(*files) - - Add the given sequence of files to the beginning of the headers list. - By default, headers will be installed under <python- - include>/<self.name.replace('.','/')>/ directory. If an item of files - is a tuple, then its first argument specifies the actual installation - location relative to the <python-include> path. - - .. method:: add_extension(name, sources, **kw) - - Create and add an Extension instance to the ext_modules list. The - first argument defines the name of the extension module that will be - installed under the self.name package. The second argument is a list - of sources. This method also takes the following optional keyword - arguments that are passed on to the Extension constructor: - include_dirs, define_macros, undef_macros, library_dirs, libraries, - runtime_library_dirs, extra_objects, swig_opts, depends, language, - f2py_options, module_dirs, and extra_info. - - The self.paths(...) method is applied to all lists that may contain - paths. The extra_info is a dictionary or a list of dictionaries whose - content will be appended to the keyword arguments. The depends list - contains paths to files or directories that the sources of the - extension module depend on. If any path in the depends list is newer - than the extension module, then the module will be rebuilt. - - The list of sources may contain functions (called source generators) - which must take an extension instance and a build directory as inputs - and return a source file or list of source files or None. If None is - returned then no sources are generated. If the Extension instance has - no sources after processing all source generators, then no extension - module is built. + .. automethod:: todict - .. method:: add_library(name, sources, **build_info) + .. automethod:: get_distribution - Add a library to the list of libraries. Allowed keyword arguments are - depends, macros, include_dirs, extra_compiler_args, and f2py_options. - The name is the name of the library to be built and sources is a list - of sources (or source generating functions) to add to the library. + .. automethod:: get_subpackage - .. method:: add_scripts(*files) + .. automethod:: add_subpackage - Add the sequence of files to the beginning of the scripts list. - Scripts will be installed under the <prefix>/bin/ directory. + .. automethod:: add_data_files - .. method:: paths(*paths) + .. automethod:: add_data_dir - Applies glob.glob(...) to each path in the sequence (if needed) and - pre-pends the local_path if needed. Because this is called on all - source lists, this allows wildcard characters to be specified in lists - of sources for extension modules and libraries and scripts and allows - path-names be relative to the source directory. + .. automethod:: add_include_dirs - .. method:: get_config_cmd() + .. automethod:: add_headers - Returns the numpy.distutils config command instance. + .. automethod:: add_extension - .. method:: get_build_temp_dir() + .. automethod:: add_library - Return a path to a temporary directory where temporary files should be - placed. + .. automethod:: add_scripts - .. method:: have_f77c() + .. automethod:: add_installed_library - True if a Fortran 77 compiler is available (because a simple Fortran - 77 code was able to be compiled successfully). + .. automethod:: add_npy_pkg_config - .. method:: have_f90c() + .. automethod:: paths - True if a Fortran 90 compiler is available (because a simple Fortran - 90 code was able to be compiled successfully) + .. automethod:: get_config_cmd - .. method:: get_version() + .. automethod:: get_build_temp_dir - Return a version string of the current package or None if the version - information could not be detected. This method scans files named - __version__.py, <packagename>_version.py, version.py, and - __svn_version__.py for string variables version, __version\__, and - <packagename>_version, until a version number is found. + .. automethod:: have_f77c - .. method:: make_svn_version_py() + .. automethod:: have_f90c - Appends a data function to the data_files list that will generate - __svn_version__.py file to the current package directory. This file - will be removed from the source directory when Python exits (so that - it can be re-generated next time the package is built). This is - intended for working with source directories that are in an SVN - repository. + .. automethod:: get_version - .. method:: make_config_py() + .. automethod:: make_svn_version_py - Generate a package __config__.py file containing system information - used during the building of the package. This file is installed to the - package installation directory. - - .. method:: get_info(*names) - - Return information (from system_info.get_info) for all of the names in - the argument list in a single dictionary. + .. automethod:: make_config_py + .. automethod:: get_info Other modules ------------- @@ -299,6 +125,83 @@ Other modules log.set_verbosity exec_command +Building Installable C libraries +================================ + +Conventional C libraries (installed through add_library) are not installed, and +are just used during the build (they are statically linked). An installable C +library is a pure C library, which does not depend on the python C runtime, and +is installed such as it may be used by third-party packages. To build and +install the C library, you just use the method add_installed_library instead of +add_library, which takes the same arguments except for an additional +install_dir argument:: + + >>> config.add_installed_library('foo', sources=['foo.c'], install_dir='lib') + +npy-pkg-config files +-------------------- + +To make the necessary build options available to third parties, you could use +the npy-pkg-config mechanism implemented in numpy.distutils. This mechanism is +based on an .ini file which contains all the options. A .ini file is very +similar to .pc files as used by the pkg-config unix utility:: + + [meta] + Name: foo + Version: 1.0 + Description: foo library + + [variables] + prefix = /home/user/local + libdir = ${prefix}/lib + includedir = ${prefix}/include + + [default] + cflags = -I${includedir} + libs = -L${libdir} -lfoo + +Generally, the file needs to be generated during the build, since it needs some +information known at build time only (e.g. prefix). This is mostly automatic if +one uses the Configuration method add_npy_pkg_config. Assuming we have a +template file foo.ini.in as follows:: + + [meta] + Name: foo + Version: @version@ + Description: foo library + + [variables] + prefix = @prefix@ + libdir = ${prefix}/lib + includedir = ${prefix}/include + + [default] + cflags = -I${includedir} + libs = -L${libdir} -lfoo + +and the following code in setup.py:: + + >>> config.add_installed_library('foo', sources=['foo.c'], install_dir='lib') + >>> subst = {'version': '1.0'} + >>> config.add_npy_pkg_config('foo.ini.in', 'lib', subst_dict=subst) + +This will install the file foo.ini into the directory package_dir/lib, and the +foo.ini file will be generated from foo.ini.in, where each @version@ will be +replaced by subst_dict['version']. The dictionary has an additional prefix +substitution rule automatically added, which contains the install prefix (since +this is not easy to get from setup.py). npy-pkg-config files can also be +installed at the same location as used for numpy, using the path returned from +get_npy_pkg_dir function. + +Reusing a C library from another package +---------------------------------------- + +Info are easily retrieved from the get_info function in numpy.distutils.misc_util:: + + >>> info = get_info('npymath') + >>> config.add_extension('foo', sources=['foo.c'], extra_info=**info) + +An additional list of paths to look for .ini files can be given to get_info. Conversion of ``.src`` files ============================ diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index 0d83053ac..00317a18e 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -19,7 +19,6 @@ For learning how to use NumPy, see also :ref:`user`. arrays ufuncs - generalized_ufuncs routines ctypes distutils @@ -37,8 +36,6 @@ the functions are written by numerous contributors and developers of Numpy, both prior to and during the `Numpy Documentation Marathon <http://scipy.org/Developer_Zone/DocMarathon2008>`__. -The Documentation Marathon is still ongoing. Please help us write -better documentation for Numpy by joining it! Instructions on how to -join and what to do can be found +Please help to improve NumPy's documentation! Instructions on how to +join the ongoing documentation marathon can be found `on the scipy.org website <http://scipy.org/Developer_Zone/DocMarathon2008>`__ - diff --git a/doc/source/reference/routines.indexing.rst b/doc/source/reference/routines.indexing.rst index f618fa0a4..9d8fde882 100644 --- a/doc/source/reference/routines.indexing.rst +++ b/doc/source/reference/routines.indexing.rst @@ -21,6 +21,13 @@ Generating index arrays ix_ ogrid unravel_index + diag_indices + diag_indices_from + mask_indices + tril_indices + tril_indices_from + triu_indices + triu_indices_from Indexing-like operations ------------------------ @@ -42,6 +49,7 @@ Inserting data into arrays place put putmask + fill_diagonal Iterating over arrays --------------------- diff --git a/doc/source/reference/routines.set.rst b/doc/source/reference/routines.set.rst index 4c298e80f..27c6aeb89 100644 --- a/doc/source/reference/routines.set.rst +++ b/doc/source/reference/routines.set.rst @@ -8,16 +8,15 @@ Making proper sets .. autosummary:: :toctree: generated/ - unique1d + unique Boolean operations ------------------ .. autosummary:: :toctree: generated/ + in1d intersect1d - intersect1d_nu setdiff1d - setmember1d setxor1d union1d diff --git a/doc/source/reference/routines.statistics.rst b/doc/source/reference/routines.statistics.rst index b41b62839..578cbd09a 100644 --- a/doc/source/reference/routines.statistics.rst +++ b/doc/source/reference/routines.statistics.rst @@ -35,6 +35,7 @@ Correlating :toctree: generated/ corrcoef + acorrelate correlate cov diff --git a/doc/source/reference/routines.testing.rst b/doc/source/reference/routines.testing.rst index 5a5f4cb8c..075c1e3a9 100644 --- a/doc/source/reference/routines.testing.rst +++ b/doc/source/reference/routines.testing.rst @@ -29,6 +29,7 @@ Decorators .. autosummary:: :toctree: generated/ + decorators.deprecated decorators.knownfailureif decorators.setastest decorators.skipif diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index d63486342..8096e1497 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -102,19 +102,24 @@ Output type determination The output of the ufunc (and its methods) is not necessarily an :class:`ndarray`, if all input arguments are not :class:`ndarrays <ndarray>`. -All output arrays will be passed to the :obj:`__array_wrap__` -method of the input (besides :class:`ndarrays <ndarray>`, and scalars) -that defines it **and** has the highest :obj:`__array_priority__` of -any other input to the universal function. The default -:obj:`__array_priority__` of the ndarray is 0.0, and the default -:obj:`__array_priority__` of a subtype is 1.0. Matrices have -:obj:`__array_priority__` equal to 10.0. +All output arrays will be passed to the :obj:`__array_prepare__` and +:obj:`__array_wrap__` methods of the input (besides +:class:`ndarrays <ndarray>`, and scalars) that defines it **and** has +the highest :obj:`__array_priority__` of any other input to the +universal function. The default :obj:`__array_priority__` of the +ndarray is 0.0, and the default :obj:`__array_priority__` of a subtype +is 1.0. Matrices have :obj:`__array_priority__` equal to 10.0. The ufuncs can also all take output arguments. The output will be cast if necessary to the provided output array. If a class with an :obj:`__array__` method is used for the output, results will be written to the object returned by :obj:`__array__`. Then, if the class -also has an :obj:`__array_wrap__` method, the returned +also has an :obj:`__array_prepare__` method, it is called so metadata +may be determined based on the context of the ufunc (the context +consisting of the ufunc itself, the arguments passed to the ufunc, and +the ufunc domain.) The array object returned by +:obj:`__array_prepare__` is passed to the ufunc for computation. +Finally, if the class also has an :obj:`__array_wrap__` method, the returned :class:`ndarray` result will be passed to that method just before passing control back to the caller. @@ -549,6 +554,7 @@ single operation. isinf isnan signbit + copysign modf ldexp frexp diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst index 8d8812c80..a9945a0d1 100644 --- a/doc/source/user/index.rst +++ b/doc/source/user/index.rst @@ -19,9 +19,9 @@ and classes, see :ref:`reference`. .. toctree:: :maxdepth: 2 + install howtofind basics performance misc c-info - diff --git a/doc/source/user/install.rst b/doc/source/user/install.rst new file mode 100644 index 000000000..472ee20e3 --- /dev/null +++ b/doc/source/user/install.rst @@ -0,0 +1,148 @@ +***************************** +Building and installing NumPy +***************************** + +Binary installers +================= + +In most use cases the best way to install NumPy on your system is by using an +installable binary package for your operating system. + +Windows +------- + +Good solutions for Windows are, The Enthought Python Distribution `(EPD) +<http://www.enthought.com/products/epd.php>`_ (which provides binary installers +for Windows, OS X and Redhat) and `Python (x, y) <http://www.pythonxy.com>`_. +Both of these packages include Python, NumPy and many additional packages. +A lightweight alternative is to download the Python installer from +`www.python.org <http://www.python.org>`_ and the NumPy installer for your +Python version from the Sourceforge `download site +<http://sourceforge.net/project/showfiles.php?group_id=1369&package_id=175103>`_ + +Linux +----- + +Most of the major distributions provide packages for NumPy, but these can lag +behind the most recent NumPy release. Pre-built binary packages for Ubuntu are +available on the `scipy ppa <https://edge.launchpad.net/~scipy/+archive/ppa>`_. +Redhat binaries are available in the `EPD +<http://www.enthought.com/products/epd.php>`_. + +Mac OS X +-------- + +A universal binary installer for NumPy is available from the `download site +<http://sourceforge.net/project/showfiles.php?group_id=1369&package_id=175103>`_. +The `EPD <http://www.enthought.com/products/epd.php>`_ provides NumPy binaries. + +Building from source +==================== + +A general overview of building NumPy from source is given here, with detailed +instructions for specific platforms given seperately. + +Prerequisites +------------- + +Building NumPy requires the following software installed: + +1) Python 2.4.x, 2.5.x or 2.6.x + + On Debian and derivative (Ubuntu): python, python-dev + + On Windows: the official python installer at + `www.python.org <http://www.python.org>`_ is enough + + Make sure that the Python package distutils is installed before + continuing. For example, in Debian GNU/Linux, distutils is included + in the python-dev package. + + Python must also be compiled with the zlib module enabled. + +2) Compilers + + To build any extension modules for Python, you'll need a C compiler. Various + NumPy modules use FORTRAN 77 libraries, so you'll also need a FORTRAN 77 + compiler installed. + + Note that NumPy is developed mainly using GNU compilers. Compilers from other + vendors such as Intel, Absoft, Sun, NAG, Compaq, Vast, Porland, Lahey, HP, + IBM, Microsoft are only supported in the form of community feedback, and may + not work out of the box. GCC 3.x (and later) compilers are recommended. + +3) Linear Algebra libraries + + NumPy does not require any external linear algebra libraries to be installed. + However, if these are available, NumPy's setup script can detect them and use + them for building. A number of different LAPACK library setups can be used, + including optimized LAPACK libraries such as ATLAS, MKL or the + Accelerate/vecLib framework on OS X. + +FORTRAN ABI mismatch +-------------------- + +The two most popular open source fortran compilers are g77 and gfortran. +Unfortunately, they are not ABI compatible, which means that concretely you +should avoid mixing libraries built with one with another. In particular, if +your blas/lapack/atlas is built with g77, you *must* use g77 when building +numpy and scipy; on the contrary, if your atlas is built with gfortran, you +*must* build numpy/scipy with gfortran. This applies for most other cases where +different FORTRAN compilers might have been used. + +Choosing the fortran compiler +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To build with g77: + + python setup.py build --fcompiler=gnu + +To build with gfortran: + + python setup.py build --fcompiler=gnu95 + +For more information see: + + python setup.py build --help-fcompiler + +How to check the ABI of blas/lapack/atlas +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +One relatively simple and reliable way to check for the compiler used to build +a library is to use ldd on the library. If libg2c.so is a dependency, this +means that g77 has been used. If libgfortran.so is a a dependency, gfortran has +been used. If both are dependencies, this means both have been used, which is +almost always a very bad idea. + +Building with ATLAS support +--------------------------- + +Ubuntu 8.10 (Intrepid) +~~~~~~~~~~~~~~~~~~~~~~ + +You can install the necessary packages for optimized ATLAS with this command: + + sudo apt-get install libatlas-base-dev + +If you have a recent CPU with SIMD suppport (SSE, SSE2, etc...), you should +also install the corresponding package for optimal performances. For example, +for SSE2: + + sudo apt-get install libatlas3gf-sse2 + +*NOTE*: if you build your own atlas, Intrepid changed its default fortran +compiler to gfortran. So you should rebuild everything from scratch, including +lapack, to use it on Intrepid. + +Ubuntu 8.04 and lower +~~~~~~~~~~~~~~~~~~~~~ + +You can install the necessary packages for optimized ATLAS with this command: + + sudo apt-get install atlas3-base-dev + +If you have a recent CPU with SIMD suppport (SSE, SSE2, etc...), you should +also install the corresponding package for optimal performances. For example, +for SSE2: + + sudo apt-get install atlas3-sse2 diff --git a/doc/sphinxext/README.txt b/doc/sphinxext/README.txt new file mode 100644 index 000000000..96477033b --- /dev/null +++ b/doc/sphinxext/README.txt @@ -0,0 +1,26 @@ +===================================== +numpydoc -- Numpy's Sphinx extensions +===================================== + +Numpy's documentation uses several custom extensions to Sphinx. These +are shipped in this ``numpydoc`` package, in case you want to make use +of them in third-party projects. + +The following extensions are available: + + - ``numpydoc``: support for the Numpy docstring format in Sphinx, and add + the code description directives ``np-function``, ``np-cfunction``, etc. + that support the Numpy docstring syntax. + + - ``numpydoc.traitsdoc``: For gathering documentation about Traits attributes. + + - ``numpydoc.plot_directives``: Adaptation of Matplotlib's ``plot::`` + directive. Note that this implementation may still undergo severe + changes or eventually be deprecated. + + - ``numpydoc.only_directives``: (DEPRECATED) + + - ``numpydoc.autosummary``: (DEPRECATED) An ``autosummary::`` directive. + Available in Sphinx 0.6.2 and (to-be) 1.0 as ``sphinx.ext.autosummary``, + and it the Sphinx 1.0 version is recommended over that included in + Numpydoc. diff --git a/doc/sphinxext/docscrape.py b/doc/sphinxext/docscrape.py index f374b3ddc..0e073da93 100644 --- a/doc/sphinxext/docscrape.py +++ b/doc/sphinxext/docscrape.py @@ -8,7 +8,7 @@ import re import pydoc from StringIO import StringIO from warnings import warn -4 + class Reader(object): """A line-based string reader. @@ -386,6 +386,8 @@ class NumpyDocString(object): out += self._str_see_also(func_role) for s in ('Notes','References','Examples'): out += self._str_section(s) + for param_list in ('Attributes', 'Methods'): + out += self._str_param_list(param_list) out += self._str_index() return '\n'.join(out) @@ -477,21 +479,19 @@ class ClassDoc(NumpyDocString): NumpyDocString.__init__(self, doc) + if not self['Methods']: + self['Methods'] = [(name, '', '') for name in sorted(self.methods)] + + if not self['Attributes']: + self['Attributes'] = [(name, '', '') + for name in sorted(self.properties)] + @property def methods(self): return [name for name,func in inspect.getmembers(self._cls) if not name.startswith('_') and callable(func)] - def __str__(self): - out = '' - out += super(ClassDoc, self).__str__() - out += "\n\n" - - #for m in self.methods: - # print "Parsing `%s`" % m - # out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n' - # out += '.. index::\n single: %s; %s\n\n' % (self._name, m) - - return out - - + @property + def properties(self): + return [name for name,func in inspect.getmembers(self._cls) + if not name.startswith('_') and func is None] diff --git a/doc/sphinxext/docscrape_sphinx.py b/doc/sphinxext/docscrape_sphinx.py index 5b60567b3..12907731e 100644 --- a/doc/sphinxext/docscrape_sphinx.py +++ b/doc/sphinxext/docscrape_sphinx.py @@ -44,6 +44,55 @@ class SphinxDocString(NumpyDocString): out += [''] return out + @property + def _obj(self): + if hasattr(self, '_cls'): + return self._cls + elif hasattr(self, '_f'): + return self._f + return None + + def _str_member_list(self, name): + """ + Generate a member listing, autosummary:: table where possible, + and a table where not. + + """ + out = [] + if self[name]: + out += ['.. rubric:: %s' % name, ''] + prefix = getattr(self, '_name', '') + + if prefix: + prefix = '~%s.' % prefix + + autosum = [] + others = [] + for param, param_type, desc in self[name]: + param = param.strip() + if not self._obj or hasattr(self._obj, param): + autosum += [" %s%s" % (prefix, param)] + else: + others.append((param, param_type, desc)) + + if autosum: + out += ['.. autosummary::', ' :toctree:', ''] + out += autosum + + if others: + maxlen_0 = max([len(x[0]) for x in others]) + maxlen_1 = max([len(x[1]) for x in others]) + hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10 + fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) + n_indent = maxlen_0 + maxlen_1 + 4 + out += [hdr] + for param, param_type, desc in others: + out += [fmt % (param.strip(), param_type)] + out += self._str_indent(desc, n_indent) + out += [hdr] + out += [''] + return out + def _str_section(self, name): out = [] if self[name]: @@ -95,7 +144,7 @@ class SphinxDocString(NumpyDocString): out += [''] # Latex collects all references to a separate bibliography, # so we need to insert links to it - if sphinx.__version__ >= 0.6: + if sphinx.__version__ >= "0.6": out += ['.. only:: latex',''] else: out += ['.. latexonly::',''] @@ -127,14 +176,15 @@ class SphinxDocString(NumpyDocString): out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() - for param_list in ('Parameters', 'Attributes', 'Methods', - 'Returns','Raises'): + for param_list in ('Parameters', 'Returns', 'Raises'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) out += self._str_section('Notes') out += self._str_references() out += self._str_examples() + for param_list in ('Attributes', 'Methods'): + out += self._str_member_list(param_list) out = self._str_indent(out,indent) return '\n'.join(out) @@ -144,6 +194,11 @@ class SphinxFunctionDoc(SphinxDocString, FunctionDoc): class SphinxClassDoc(SphinxDocString, ClassDoc): pass +class SphinxObjDoc(SphinxDocString): + def __init__(self, obj, doc): + self._f = obj + SphinxDocString.__init__(self, doc) + def get_doc_object(obj, what=None, doc=None): if what is None: if inspect.isclass(obj): @@ -161,5 +216,4 @@ def get_doc_object(obj, what=None, doc=None): else: if doc is None: doc = pydoc.getdoc(obj) - return SphinxDocString(doc) - + return SphinxObjDoc(obj, doc) diff --git a/doc/sphinxext/numpydoc.py b/doc/sphinxext/numpydoc.py index 846dd7b85..707107daf 100644 --- a/doc/sphinxext/numpydoc.py +++ b/doc/sphinxext/numpydoc.py @@ -18,6 +18,7 @@ It will: import os, re, pydoc from docscrape_sphinx import get_doc_object, SphinxDocString +from sphinx.util.compat import Directive import inspect def mangle_docstrings(app, what, name, obj, options, lines, @@ -25,29 +26,29 @@ def mangle_docstrings(app, what, name, obj, options, lines, if what == 'module': # Strip top title - title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', + title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', re.I|re.S) - lines[:] = title_re.sub('', "\n".join(lines)).split("\n") + lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n") else: - doc = get_doc_object(obj, what, "\n".join(lines)) + doc = get_doc_object(obj, what, u"\n".join(lines)) doc.use_plots = app.config.numpydoc_use_plots - lines[:] = str(doc).split("\n") + lines[:] = unicode(doc).split(u"\n") if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ obj.__name__: if hasattr(obj, '__module__'): - v = dict(full_name="%s.%s" % (obj.__module__, obj.__name__)) + v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__)) else: v = dict(full_name=obj.__name__) - lines += ['', '.. htmlonly::', ''] - lines += [' %s' % x for x in + lines += [u'', u'.. htmlonly::', ''] + lines += [u' %s' % x for x in (app.config.numpydoc_edit_link % v).split("\n")] # replace reference numbers so that there are no duplicates references = [] for line in lines: line = line.strip() - m = re.match(r'^.. \[([a-z0-9_.-])\]', line, re.I) + m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I) if m: references.append(m.group(1)) @@ -56,14 +57,14 @@ def mangle_docstrings(app, what, name, obj, options, lines, if references: for i, line in enumerate(lines): for r in references: - if re.match(r'^\d+$', r): - new_r = "R%d" % (reference_offset[0] + int(r)) + if re.match(ur'^\d+$', r): + new_r = u"R%d" % (reference_offset[0] + int(r)) else: - new_r = "%s%d" % (r, reference_offset[0]) - lines[i] = lines[i].replace('[%s]_' % r, - '[%s]_' % new_r) - lines[i] = lines[i].replace('.. [%s]' % r, - '.. [%s]' % new_r) + new_r = u"%s%d" % (r, reference_offset[0]) + lines[i] = lines[i].replace(u'[%s]_' % r, + u'[%s]_' % new_r) + lines[i] = lines[i].replace(u'.. [%s]' % r, + u'.. [%s]' % new_r) reference_offset[0] += len(references) @@ -78,8 +79,8 @@ def mangle_signature(app, what, name, obj, options, sig, retann): doc = SphinxDocString(pydoc.getdoc(obj)) if doc['Signature']: - sig = re.sub("^[^(]*", "", doc['Signature']) - return sig, '' + sig = re.sub(u"^[^(]*", u"", doc['Signature']) + return sig, u'' def initialize(app): try: @@ -96,6 +97,78 @@ def setup(app, get_doc_object_=get_doc_object): app.add_config_value('numpydoc_edit_link', None, True) app.add_config_value('numpydoc_use_plots', None, False) + # Extra mangling directives + name_type = { + 'cfunction': 'function', + 'cmember': 'attribute', + 'cmacro': 'function', + 'ctype': 'class', + 'cvar': 'object', + 'class': 'class', + 'function': 'function', + 'attribute': 'attribute', + 'method': 'function', + 'staticmethod': 'function', + 'classmethod': 'function', + } + + for name, objtype in name_type.items(): + app.add_directive('np-' + name, wrap_mangling_directive(name, objtype)) + +#------------------------------------------------------------------------------ +# Input-mangling directives +#------------------------------------------------------------------------------ +from docutils.statemachine import ViewList + +def get_directive(name): + from docutils.parsers.rst import directives + try: + return directives.directive(name, None, None)[0] + except AttributeError: + pass + try: + # docutils 0.4 + return directives._directives[name] + except (AttributeError, KeyError): + raise RuntimeError("No directive named '%s' found" % name) + +def wrap_mangling_directive(base_directive_name, objtype): + base_directive = get_directive(base_directive_name) + + if inspect.isfunction(base_directive): + base_func = base_directive + class base_directive(Directive): + required_arguments = base_func.arguments[0] + optional_arguments = base_func.arguments[1] + final_argument_whitespace = base_func.arguments[2] + option_spec = base_func.options + has_content = base_func.content + def run(self): + return base_func(self.name, self.arguments, self.options, + self.content, self.lineno, + self.content_offset, self.block_text, + self.state, self.state_machine) + + class directive(base_directive): + def run(self): + env = self.state.document.settings.env + + name = None + if self.arguments: + m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) + name = m.group(2).strip() + + if not name: + name = self.arguments[0] + + lines = list(self.content) + mangle_docstrings(env.app, objtype, name, None, None, lines) + self.content = ViewList(lines, self.content.parent) + + return base_directive.run(self) + + return directive + #------------------------------------------------------------------------------ # Monkeypatch sphinx.ext.autodoc to accept argspecless autodocs (Sphinx < 0.5) #------------------------------------------------------------------------------ diff --git a/doc/sphinxext/plot_directive.py b/doc/sphinxext/plot_directive.py index f564cd670..8de8c7399 100644 --- a/doc/sphinxext/plot_directive.py +++ b/doc/sphinxext/plot_directive.py @@ -73,6 +73,7 @@ TODO """ import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap, traceback +import sphinx import warnings warnings.warn("A plot_directive module is also available under " @@ -157,7 +158,7 @@ except ImportError: TEMPLATE = """ {{ source_code }} -.. htmlonly:: +{{ only_html }} {% if source_code %} (`Source code <{{ source_link }}>`__) @@ -188,7 +189,7 @@ TEMPLATE = """ ) {% endfor %} -.. latexonly:: +{{ only_latex }} {% for img in images %} .. image:: {{ build_dir }}/{{ img.basename }}.pdf @@ -304,11 +305,20 @@ def run(arguments, content, options, state_machine, state, lineno): opts = [':%s: %s' % (key, val) for key, val in options.items() if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] + if sphinx.__version__ >= "0.6": + only_html = ".. only:: html" + only_latex = ".. only:: latex" + else: + only_html = ".. htmlonly::" + only_latex = ".. latexonly::" + result = format_template( TEMPLATE, dest_dir=dest_dir_link, build_dir=build_dir_link, source_link=source_link, + only_html=only_html, + only_latex=only_latex, options=opts, images=images, source_code=source_code) diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py index 121b8088f..d588cbba0 100644 --- a/numpy/add_newdocs.py +++ b/numpy/add_newdocs.py @@ -747,12 +747,19 @@ add_newdoc('numpy.core.multiarray', 'set_string_function', Parameters ---------- - f : Python function + f : function or None Function to be used to pretty print arrays. The function should expect a single array argument and return a string of the representation of - the array. - repr : int - Unknown. + the array. If None, the function is reset to the default NumPy function + to print arrays. + repr : bool, optional + If True (default), the function for pretty printing (``__repr__``) + is set, if False the function that returns the default string + representation (``__str__``) is set. + + See Also + -------- + set_printoptions, get_printoptions Examples -------- @@ -766,6 +773,24 @@ add_newdoc('numpy.core.multiarray', 'set_string_function', >>> print a [0 1 2 3 4 5 6 7 8 9] + We can reset the function to the default: + + >>> np.set_string_function(None) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'l') + + `repr` affects either pretty printing or normal string representation. + Note that ``__repr__`` is still affected by setting ``__str__`` + because the width of each array element in the returned string becomes + equal to the length of the result of ``__str__()``. + + >>> x = np.arange(4) + >>> np.set_string_function(lambda x:'random', repr=False) + >>> x.__str__() + 'random' + >>> x.__repr__() + 'array([ 0, 1, 2, 3])' + """) add_newdoc('numpy.core.multiarray', 'set_numeric_ops', @@ -973,12 +998,37 @@ add_newdoc('numpy.core.multiarray','newbuffer', """) -add_newdoc('numpy.core.multiarray','getbuffer', - """getbuffer(obj [,offset[, size]]) +add_newdoc('numpy.core.multiarray', 'getbuffer', + """ + getbuffer(obj [,offset[, size]]) Create a buffer object from the given object referencing a slice of - length size starting at offset. Default is the entire buffer. A - read-write buffer is attempted followed by a read-only buffer. + length size starting at offset. + + Default is the entire buffer. A read-write buffer is attempted followed + by a read-only buffer. + + Parameters + ---------- + obj : object + + offset : int, optional + + size : int, optional + + Returns + ------- + buffer_obj : buffer + + Examples + -------- + >>> buf = np.getbuffer(np.ones(5), 1, 3) + >>> len(buf) + 3 + >>> buf[0] + '\\x00' + >>> buf + <read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0> """) @@ -1595,8 +1645,14 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', """)) +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__', + """a.__array_prepare__(obj) -> Object of same type as ndarray object obj. + + """)) + + add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__', - """a.__array_wrap__(obj) -> Object of same type as a from ndarray obj. + """a.__array_wrap__(obj) -> Object of same type as ndarray object a. """)) @@ -2668,6 +2724,21 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view', type : python type Type of the returned view, e.g. ndarray or matrix. + + Notes + ----- + + `a.view()` is used two different ways. + + `a.view(some_dtype)` or `a.view(dtype=some_dtype)` constructs a view of + the array's memory with a different dtype. This can cause a + reinterpretation of the bytes of memory. + + `a.view(ndarray_subclass)`, or `a.view(type=ndarray_subclass)`, just + returns an instance of ndarray_subclass that looks at the same array (same + shape, dtype, etc.). This does not cause a reinterpretation of the memory. + + Examples -------- >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) @@ -2675,12 +2746,27 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view', Viewing array data using a different type and dtype: >>> y = x.view(dtype=np.int16, type=np.matrix) - >>> print y.dtype - int16 - + >>> y + matrix([[513]], dtype=int16) >>> print type(y) <class 'numpy.core.defmatrix.matrix'> + Creating a view on a structured array so it can be used in calculations + + >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)]) + >>> xv = x.view(dtype=np.int8).reshape(-1,2) + >>> xv + array([[1, 2], + [3, 4]], dtype=int8) + >>> xv.mean(0) + array([ 2., 3.]) + + Making changes to the view changes the underlying array + + >>> xv[0,1] = 20 + >>> print x + [(1, 20) (3, 4)] + Using a view to convert an array to a record array: >>> z = x.view(np.recarray) @@ -2704,9 +2790,9 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view', add_newdoc('numpy.core.umath', 'frexp', """ - Return normalized fraction and exponent of 2, element-wise of input array. + Return normalized fraction and exponent of 2 of input array, element-wise. - Returns (`out1`, `out2`) from equation `x` = `out1` * ( 2 ** `out2` ) + Returns (`out1`, `out2`) from equation ``x` = out1 * 2**out2``. Parameters ---------- @@ -2715,19 +2801,29 @@ add_newdoc('numpy.core.umath', 'frexp', Returns ------- - (out1, out2) : tuple of ndarray, (float, int) - The `out1` ndarray is a float array with numbers between -1 and 1. - The `out2` array is an int array represent the exponent of 2. + (out1, out2) : tuple of ndarrays, (float, int) + `out1` is a float array with values between -1 and 1. + `out2` is an int array which represent the exponent of 2. + + See Also + -------- + ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`. + + Notes + ----- + Complex dtypes are not supported, they will raise a TypeError. Examples -------- - >>> y1,y2 = np.frexp([3.4, 5.7, 1, 10, -100, 0]) + >>> x = np.arange(9) + >>> y1, y2 = np.frexp(x) >>> y1 - array([ 0.85 , 0.7125 , 0.5 , 0.625 , -0.78125, 0. ]) + array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875, + 0.5 ]) >>> y2 - array([2, 3, 1, 4, 7, 0], dtype=int32) + array([0, 1, 2, 2, 3, 3, 3, 3, 4]) >>> y1 * 2**y2 - array([ 3.4, 5.7, 1. , 10. , -100. , 0. ]) + array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.]) """) @@ -2764,31 +2860,97 @@ add_newdoc('numpy.core.umath', 'ldexp', Parameters ---------- x1 : array_like - The significand. + The array of multipliers. x2 : array_like - The exponent. + The array of exponents. Returns ------- y : array_like - y = x1 * 2**x2 + The output array, the result of ``x1 * 2**x2``. + + See Also + -------- + frexp : Return (y1, y2) from ``x = y1 * 2**y2``, the inverse of `ldexp`. + + Notes + ----- + Complex dtypes are not supported, they will raise a TypeError. + + `ldexp` is useful as the inverse of `frexp`, if used by itself it is + more clear to simply use the expression ``x1 * 2**x2``. Examples -------- - >>> np.ldexp(5., 2) - 20. + >>> np.ldexp(5, np.arange(4)) + array([ 5., 10., 20., 40.], dtype=float32) + + >>> x = np.arange(6) + >>> np.ldexp(*np.frexp(x)) + array([ 0., 1., 2., 3., 4., 5.]) """) -add_newdoc('numpy.core.umath','geterrobj', - """geterrobj() +add_newdoc('numpy.core.umath', 'geterrobj', + """ + geterrobj() - Used internally by `geterr`. + Return the current object that defines floating-point error handling. + + The error object contains all information that defines the error handling + behavior in Numpy. `geterrobj` is used internally by the other + functions that get and set error handling behavior (`geterr`, `seterr`, + `geterrcall`, `seterrcall`). Returns ------- errobj : list - Internal numpy buffer size, error mask, error callback function. + The error object, a list containing three elements: + [internal numpy buffer size, error mask, error callback function]. + + The error mask is a single integer that holds the treatment information + on all four floating point errors. If we print it in base 8, we can see + what treatment is set for "invalid", "under", "over", and "divide" (in + that order). The printed string can be interpreted with + + * 0 : 'ignore' + * 1 : 'warn' + * 2 : 'raise' + * 3 : 'call' + * 4 : 'print' + * 5 : 'log' + + See Also + -------- + seterrobj, seterr, geterr, seterrcall, geterrcall + getbufsize, setbufsize + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> np.geterrobj() # first get the defaults + [10000, 0, None] + + >>> def err_handler(type, flag): + ... print "Floating point error (%s), with flag %s" % (type, flag) + ... + >>> old_bufsize = np.setbufsize(20000) + >>> old_err = np.seterr(divide='raise') + >>> old_handler = np.seterrcall(err_handler) + >>> np.geterrobj() + [20000, 2, <function err_handler at 0x91dcaac>] + + >>> old_err = np.seterr(all='ignore') + >>> np.base_repr(np.geterrobj()[1], 8) + '0' + >>> old_err = np.seterr(divide='warn', over='log', under='call', + invalid='print') + >>> np.base_repr(np.geterrobj()[1], 8) + '4351' """) @@ -2796,16 +2958,57 @@ add_newdoc('numpy.core.umath', 'seterrobj', """ seterrobj(errobj) - Used internally by `seterr`. + Set the object that defines floating-point error handling. + + The error object contains all information that defines the error handling + behavior in Numpy. `seterrobj` is used internally by the other + functions that set error handling behavior (`seterr`, `seterrcall`). Parameters ---------- errobj : list - [buffer_size, error_mask, callback_func] + The error object, a list containing three elements: + [internal numpy buffer size, error mask, error callback function]. + + The error mask is a single integer that holds the treatment information + on all four floating point errors. If we print it in base 8, we can see + what treatment is set for "invalid", "under", "over", and "divide" (in + that order). The printed string can be interpreted with + + * 0 : 'ignore' + * 1 : 'warn' + * 2 : 'raise' + * 3 : 'call' + * 4 : 'print' + * 5 : 'log' See Also -------- - seterrcall + geterrobj, seterr, geterr, seterrcall, geterrcall + getbufsize, setbufsize + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> old_errobj = np.geterrobj() # first get the defaults + >>> old_errobj + [10000, 0, None] + + >>> def err_handler(type, flag): + ... print "Floating point error (%s), with flag %s" % (type, flag) + ... + >>> new_errobj = [20000, 12, err_handler] + >>> np.seterrobj(new_errobj) + >>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn') + '14' + >>> np.geterr() + {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'} + >>> np.geterrcall() + <function err_handler at 0xb75e9304> """) @@ -2822,32 +3025,49 @@ add_newdoc('numpy.lib._compiled_base', 'digitize', Return the indices of the bins to which each value in input array belongs. - Each index returned is such that `bins[i-1]` <= `x` < `bins[i]` if `bins` - is monotonically increasing, or `bins[i-1]` > `x` >= `bins[i]` if `bins` - is monotonically decreasing. Beyond the bounds of `bins`, 0 or len(`bins`) - is returned as appropriate. + Each index ``i`` returned is such that ``bins[i-1] <= x < bins[i]`` if + `bins` is monotonically increasing, or ``bins[i-1] > x >= bins[i]`` if + `bins` is monotonically decreasing. If values in `x` are beyond the + bounds of `bins`, 0 or ``len(bins)`` is returned as appropriate. Parameters ---------- x : array_like - Input array to be binned. + Input array to be binned. It has to be 1-dimensional. bins : array_like - Array of bins. + Array of bins. It has to be 1-dimensional and monotonic. Returns ------- - out : ndarray - Output array of indices of same shape as `x`. + out : ndarray of ints + Output array of indices, of same shape as `x`. + + Raises + ------ + ValueError + If the input is not 1-dimensional, or if `bins` is not monotonic. + TypeError + If the type of the input is complex. + + See Also + -------- + bincount, histogram, unique + + Notes + ----- + If values in `x` are such that they fall outside the bin range, + attempting to index `bins` with the indices that `digitize` returns + will result in an IndexError. Examples -------- >>> x = np.array([0.2, 6.4, 3.0, 1.6]) >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) - >>> d = np.digitize(x,bins) - >>> d + >>> inds = np.digitize(x, bins) + >>> inds array([1, 4, 3, 2]) - >>> for n in range(len(x)): - ... print bins[d[n]-1], "<=", x[n], "<", bins[d[n]] + >>> for n in range(x.size): + ... print bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]] ... 0.0 <= 0.2 < 1.0 4.0 <= 6.4 < 10.0 @@ -2860,24 +3080,54 @@ add_newdoc('numpy.lib._compiled_base', 'bincount', """ bincount(x, weights=None) - Return the number of occurrences of each value in array of nonnegative - integers. + Count number of occurrences of each value in array of non-negative ints. - The output, b[i], represents the number of times that i is found in `x`. - If `weights` is specified, every occurrence of i at a position p - contributes `weights` [p] instead of 1. + The number of bins (of size 1) is one larger than the largest value in + `x`. Each bin gives the number of occurrences of its index value in `x`. + If `weights` is specified the input array is weighted by it, i.e. if a + value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead + of ``out[n] += 1``. Parameters ---------- - x : array_like, 1 dimension, nonnegative integers - Input array. - weights : array_like, same shape as `x`, optional - Weights. + x : array_like, 1 dimension, nonnegative ints + Input array. The length of `x` is equal to ``np.amax(x)+1``. + weights : array_like, optional + Weights, array of the same shape as `x`. + + Returns + ------- + out : ndarray of ints + The result of binning the input array. + + Raises + ------ + ValueError + If the input is not 1-dimensional, or contains elements with negative + values. + TypeError + If the type of the input is float or complex. See Also -------- histogram, digitize, unique + Examples + -------- + >>> np.bincount(np.arange(5)) + array([1, 1, 1, 1, 1]) + >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) + array([1, 3, 1, 1, 0, 0, 0, 1]) + + >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23]) + >>> np.bincount(x).size == np.amax(x)+1 + True + + >>> np.bincount(np.arange(5, dtype=np.float)) + Traceback (most recent call last): + File "<stdin>", line 1, in <module> + TypeError: array cannot be safely cast to required type + """) add_newdoc('numpy.lib._compiled_base', 'add_docstring', @@ -3440,6 +3690,8 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', """ Integer indicating how this dtype relates to the built-in dtypes. + Read-only. + = ======================================================================== 0 if this is a structured array type, with fields 1 if this is a dtype compiled into numpy (such as ints, floats etc) @@ -3642,7 +3894,7 @@ add_newdoc('numpy.lib.index_tricks', 'mgrid', Examples -------- - >>> mgrid[0:5,0:5] + >>> np.mgrid[0:5,0:5] array([[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2], @@ -3653,7 +3905,7 @@ add_newdoc('numpy.lib.index_tricks', 'mgrid', [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]]) - >>> mgrid[-1:1:5j] + >>> np.mgrid[-1:1:5j] array([-1. , -0.5, 0. , 0.5, 1. ]) """) @@ -3697,7 +3949,7 @@ add_newdoc('numpy.lib.index_tricks', 'ogrid', """) - + ############################################################################## # # Documentation for `generic` attributes and methods @@ -3709,7 +3961,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', """) # Attributes - + add_newdoc('numpy.core.numerictypes', 'generic', ('T', """ """)) @@ -4000,7 +4252,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('var', add_newdoc('numpy.core.numerictypes', 'generic', ('view', """ """)) - + ############################################################################## # diff --git a/numpy/core/SConscript b/numpy/core/SConscript index f09b17618..fffbee5af 100644 --- a/numpy/core/SConscript +++ b/numpy/core/SConscript @@ -120,10 +120,9 @@ numpyconfig_sym.append(('NPY_NO_SMP', nosmp)) # Check whether we can use C99 printing formats #---------------------------------------------- if config.CheckDeclaration(('PRIdPTR'), includes = '#include <inttypes.h>'): - usec99 = 1 + numpyconfig_sym.append(('DEFINE_NPY_USE_C99_FORMATS', '#define NPY_USE_C99_FORMATS 1')) else: - usec99 = 0 -numpyconfig_sym.append(('USE_C99_FORMATS', usec99)) + numpyconfig_sym.append(('DEFINE_NPY_USE_C99_FORMATS', '')) #---------------------- # Checking the mathlib @@ -187,9 +186,10 @@ for f in ["isnan", "isinf", "signbit", "isfinite"]: """ st = config.CheckDeclaration(f, includes=includes) if st: - numpyconfig_sym.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(), - '#define NPY_HAVE_DECL_%s' % f.upper())) - + numpyconfig_sym.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(), + '#define NPY_HAVE_DECL_%s' % f.upper())) + else: + numpyconfig_sym.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(), '')) inline = config.CheckInline() config.Define('inline', inline) @@ -198,7 +198,9 @@ numpyconfig_sym.append(('NPY_INLINE', inline)) if ENABLE_SEPARATE_COMPILATION: config.Define("ENABLE_SEPARATE_COMPILATION", 1) - numpyconfig_sym.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1)) + numpyconfig_sym.append(('DEFINE_NPY_ENABLE_SEPARATE_COMPILATION', '#define NPY_ENABLE_SEPARATE_COMPILATION 1')) +else: + numpyconfig_sym.append(('DEFINE_NPY_ENABLE_SEPARATE_COMPILATION', '')) # Checking for visibility macro def visibility_define(): @@ -262,6 +264,10 @@ write_info(env) # Build #========== +# List of headers which need to be "installed " into the build directory for +# proper in-place build support +generated_headers = [] + #--------------------------------------- # Generate the public configuration file #--------------------------------------- @@ -272,7 +278,9 @@ for key, value in numpyconfig_sym: env['SUBST_DICT'] = config_dict include_dir = 'include/numpy' -env.SubstInFile(pjoin(include_dir, 'numpyconfig.h'), pjoin(include_dir, 'numpyconfig.h.in')) +target = env.SubstInFile(pjoin(include_dir, 'numpyconfig.h'), + pjoin(include_dir, 'numpyconfig.h.in')) +generated_headers.append(target[0]) env['CONFIG_H_GEN'] = numpyconfig_sym @@ -298,26 +306,45 @@ umathmodule_src = env.GenerateFromTemplate(pjoin('src', 'umath', 'umathmodule.c.src')) umath_tests_src = env.GenerateFromTemplate(pjoin('src', 'umath', 'umath_tests.c.src')) +multiarray_tests_src = env.GenerateFromTemplate(pjoin('src', 'multiarray', + 'multiarray_tests.c.src')) scalarmathmodule_src = env.GenerateFromTemplate( pjoin('src', 'scalarmathmodule.c.src')) umath = env.GenerateUmath('__umath_generated', pjoin('code_generators', 'generate_umath.py')) -multiarray_api = env.GenerateMultiarrayApi('multiarray_api', +multiarray_api = env.GenerateMultiarrayApi('include/numpy/multiarray_api', [ pjoin('code_generators', 'numpy_api_order.txt')]) +generated_headers.append(multiarray_api[0]) -ufunc_api = env.GenerateUfuncApi('ufunc_api', +ufunc_api = env.GenerateUfuncApi('include/numpy/ufunc_api', pjoin('code_generators', 'ufunc_api_order.txt')) +generated_headers.append(ufunc_api[0]) -env.Prepend(CPPPATH = ['include', '.']) +# include/numpy is added for compatibility reasons with distutils: this is +# needed for __multiarray_api.c and __ufunc_api.c included from multiarray and +# ufunc. +env.Prepend(CPPPATH = ['include', '.', 'include/numpy']) # npymath core lib -npymath_src = env.GenerateFromTemplate(pjoin('src', 'npy_math.c.src')) -env.DistutilsStaticExtLibrary("npymath", npymath_src) +npymath_src = env.GenerateFromTemplate(pjoin('src', 'npymath', 'npy_math.c.src')) +env.DistutilsInstalledStaticExtLibrary("npymath", npymath_src, install_dir='lib') env.Prepend(LIBS=["npymath"]) env.Prepend(LIBPATH=["."]) +subst_dict = {'@prefix@': '$distutils_install_prefix', + '@sep@': repr(os.path.sep)} +npymath_ini = env.SubstInFile(pjoin('lib', 'npy-pkg-config', 'npymath.ini'), + 'npymath.ini.in', SUBST_DICT=subst_dict) + +subst_dict = {'@posix_mathlib@': " ".join(['-l%s' % l for l in mlib]), + '@msvc_mathlib@': " ".join(['%s.mlib' % l for l in mlib])} +mlib_ini = env.SubstInFile(pjoin('lib', 'npy-pkg-config', 'mlib.ini'), + 'mlib.ini.in', SUBST_DICT=subst_dict) +env.Install('$distutils_installdir/lib/npy-pkg-config', mlib_ini) +env.Install('$distutils_installdir/lib/npy-pkg-config', npymath_ini) + #----------------- # Build multiarray #----------------- @@ -353,6 +380,7 @@ if ENABLE_SEPARATE_COMPILATION: else: multiarray_src = [pjoin('src', 'multiarray', 'multiarraymodule_onefile.c')] multiarray = env.DistutilsPythonExtension('multiarray', source = multiarray_src) +env.DistutilsPythonExtension('multiarray_tests', source=multiarray_tests_src) #------------------ # Build sort module @@ -395,3 +423,7 @@ if build_blasdot: dotblas_o = env.PythonObject('_dotblas', source = dotblas_src) env.Depends(dotblas_o, pjoin("blasdot", "cblas.h")) dotblas = env.DistutilsPythonExtension('_dotblas', dotblas_o) + +# "Install" the header in the build directory, so that in-place build works +for h in generated_headers: + env.Install(pjoin('$distutils_installdir', include_dir), h) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 6d3c52990..c8bc9438a 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -56,10 +56,14 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, suppress : bool, optional Whether or not suppress printing of small floating point values using scientific notation (default False). - nanstr : string, optional - String representation of floating point not-a-number (default nan). - infstr : string, optional - String representation of floating point infinity (default inf). + nanstr : str, optional + String representation of floating point not-a-number (default NaN). + infstr : str, optional + String representation of floating point infinity (default Inf). + + See Also + -------- + get_printoptions, set_string_function Examples -------- @@ -79,12 +83,9 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, >>> eps = np.finfo(float).eps >>> x = np.arange(4.) - >>> x**2 - (x + eps)**2 array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) - >>> np.set_printoptions(suppress=True) - >>> x**2 - (x + eps)**2 array([-0., -0., 0., 0.]) diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py index 509048471..69f8c2026 100644 --- a/numpy/core/code_generators/generate_numpy_api.py +++ b/numpy/core/code_generators/generate_numpy_api.py @@ -28,6 +28,7 @@ extern NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type; extern NPY_NO_EXPORT PyTypeObject PyArrayIter_Type; extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; extern NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type; +extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; extern NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type; extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; #else @@ -39,6 +40,7 @@ NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type; NPY_NO_EXPORT PyTypeObject PyArrayIter_Type; NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type; +NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type; NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; #endif @@ -113,13 +115,13 @@ _import_array(void) PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian"); return -1; } -#ifdef NPY_BIG_ENDIAN +#if NPY_BYTE_ORDER ==NPY_BIG_ENDIAN if (st != NPY_CPU_BIG) { PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ "big endian, but detected different endianness at runtime"); return -1; } -#elif defined(NPY_LITTLE_ENDIAN) +#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN if (st != NPY_CPU_LITTLE) { PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ "little endian, but detected different endianness at runtime"); diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index 15e427f29..60a8b5a30 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -650,6 +650,11 @@ defdict = { docstrings.get('numpy.core.umath.signbit'), TD(flts, out='?'), ), +'copysign' : + Ufunc(2, 1, None, + docstrings.get('numpy.core.umath.copysign'), + TD(flts), + ), 'modf' : Ufunc(1, 2, None, docstrings.get('numpy.core.umath.modf'), diff --git a/numpy/core/code_generators/numpy_api_order.txt b/numpy/core/code_generators/numpy_api_order.txt index 6b7272459..72f8d5c82 100644 --- a/numpy/core/code_generators/numpy_api_order.txt +++ b/numpy/core/code_generators/numpy_api_order.txt @@ -173,9 +173,10 @@ PyArray_CompareString PyArray_MultiIterFromObjects PyArray_GetEndianness PyArray_GetNDArrayCFeatureVersion -PyArray_Acorrelate +PyArray_Correlate2 +PyArray_NeighborhoodIterNew PyArray_SetDatetimeParseFunction PyArray_DatetimeToDatetimeStruct PyArray_TimedeltaToTimedeltaStruct PyArray_DatetimeStructToDatetime -PyArray_TimedeltaStructToTimedelta
\ No newline at end of file +PyArray_TimedeltaStructToTimedelt diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index e880f21a2..7744bb4bf 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -93,6 +93,9 @@ add_newdoc('numpy.core.umath', 'arccos', `x`-coordinate on the unit circle. For real arguments, the domain is [-1, 1]. + out : ndarray, optional + Array to store results in. + Returns ------- angle : ndarray @@ -153,6 +156,8 @@ add_newdoc('numpy.core.umath', 'arccosh', ---------- x : array_like Input array. + out : ndarray, optional + Array of the same shape as `x`. Returns ------- @@ -715,16 +720,44 @@ add_newdoc('numpy.core.umath', 'cos', ---------- x : array_like Input array in radians. + out : ndarray, optional + Output array of same shape as `x`. Returns ------- - out : ndarray - Output array of same shape as `x`. + y : ndarray + The corresponding cosine values. + + Raises + ------ + ValueError: invalid return array shape + if `out` is provided and `out.shape` != `x.shape` (See Examples) + + Notes + ----- + If `out` is provided, the function writes the result into it, + and returns a reference to `out`. (See Examples) + + References + ---------- + M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. + New York, NY: Dover, 1972. Examples -------- >>> np.cos(np.array([0, np.pi/2, np.pi])) array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00]) + >>> + >>> # Example of providing the optional output parameter + >>> out2 = np.cos([0.1], out1) + >>> out2 is out1 + True + >>> + >>> # Example of ValueError due to provision of shape mis-matched `out` + >>> np.cos(np.zeros((3,3)),np.zeros((2,2))) + Traceback (most recent call last): + File "<stdin>", line 1, in <module> + ValueError: invalid return array shape """) @@ -903,7 +936,7 @@ add_newdoc('numpy.core.umath', 'equal', add_newdoc('numpy.core.umath', 'exp', """ - Calculate the exponential of the elements in the input array. + Calculate the exponential of all elements in the input array. Parameters ---------- @@ -913,7 +946,12 @@ add_newdoc('numpy.core.umath', 'exp', Returns ------- out : ndarray - Element-wise exponential of `x`. + Output array, element-wise exponential of `x`. + + See Also + -------- + expm1 : Calculate ``exp(x) - 1`` for all elements in the array. + exp2 : Calculate ``2**x`` for all elements in the array. Notes ----- @@ -968,20 +1006,34 @@ add_newdoc('numpy.core.umath', 'exp2', x : array_like Input values. + out : ndarray, optional + \tArray to insert results into. + Returns ------- out : ndarray Element-wise 2 to the power `x`. + See Also + -------- + exp : calculate x**p. + Notes ----- .. versionadded:: 1.3.0 + + + Examples + -------- + >>> np.exp2([2,3]) + array([4,9]) + """) add_newdoc('numpy.core.umath', 'expm1', """ - Compute ``exp(x) - 1`` for all elements in the array. + Calculate ``exp(x) - 1`` for all elements in the array. Parameters ---------- @@ -1621,7 +1673,7 @@ add_newdoc('numpy.core.umath', 'log', add_newdoc('numpy.core.umath', 'log10', """ - Compute the logarithm in base 10 element-wise. + Return the base 10 logarithm of the input array, element-wise. Parameters ---------- @@ -1631,7 +1683,8 @@ add_newdoc('numpy.core.umath', 'log10', Returns ------- y : ndarray - Base-10 logarithm of `x`. + The logarithm to the base 10 of `x`, element-wise. NaNs are + returned where x is negative. Notes ----- @@ -1656,7 +1709,7 @@ add_newdoc('numpy.core.umath', 'log10', Examples -------- - >>> np.log10([1.e-15,-3.]) + >>> np.log10([1e-15, -3.]) array([-15., NaN]) """) @@ -1687,71 +1740,91 @@ add_newdoc('numpy.core.umath', 'log2', add_newdoc('numpy.core.umath', 'logaddexp', """ - Logarithm of `exp(x) + exp(y)`. + Logarithm of the sum of exponentiations of the inputs. - This function is useful in statistics where the calculated probabilities of - events may be so small as to excede the range of normal floating point - numbers. In such cases the logarithm of the calculated probability is - stored. This function allows adding probabilities stored in such a fashion. + Calculates ``log(exp(x1) + exp(x2))``. This function is useful in + statistics where the calculated probabilities of events may be so small + as to exceed the range of normal floating point numbers. In such cases + the logarithm of the calculated probability is stored. This function + allows adding probabilities stored in such a fashion. Parameters ---------- - x : array_like - Input values. - y : array_like + x1, x2 : array_like Input values. - Returns ------- result : ndarray - Logarithm of `exp(x) + exp(y)`. + Logarithm of ``exp(x1) + exp(x2)``. See Also -------- - logaddexp2 + logaddexp2: Logarithm of the sum of exponentiations of inputs in base-2. Notes ----- .. versionadded:: 1.3.0 + Examples + -------- + >>> prob1 = np.log(1e-50) + >>> prob2 = np.log(2.5e-50) + >>> prob12 = np.logaddexp(prob1, prob2) + >>> prob12 + -113.87649168120691 + >>> np.exp(prob12) + 3.5000000000000057e-50 + """) add_newdoc('numpy.core.umath', 'logaddexp2', """ - Base-2 Logarithm of `2**x + 2**y`. + Logarithm of the sum of exponentiations of the inputs in base-2. - This function is useful in machine learning when the calculated probabilities of - events may be so small as to excede the range of normal floating point - numbers. In such cases the base-2 logarithm of the calculated probability - can be used instead. This function allows adding probabilities stored in such a fashion. + Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine + learning when the calculated probabilities of events may be so small + as to exceed the range of normal floating point numbers. In such cases + the base-2 logarithm of the calculated probability can be used instead. + This function allows adding probabilities stored in such a fashion. Parameters ---------- - x : array_like - Input values. - y : array_like + x1, x2 : array_like Input values. - + out : ndarray, optional + Array to store results in. Returns ------- result : ndarray - Base-2 logarithm of `2**x + 2**y`. + Base-2 logarithm of ``2**x1 + 2**x2``. See Also -------- - logaddexp + logaddexp: Logarithm of the sum of exponentiations of the inputs. Notes ----- .. versionadded:: 1.3.0 + Examples + -------- + >>> prob1 = np.log2(1e-50) + >>> prob2 = np.log2(2.5e-50) + >>> prob12 = np.logaddexp2(prob1, prob2) + >>> prob1, prob2, prob12 + (-166.09640474436813, -164.77447664948076, -164.28904982231052) + >>> 2**prob12 + 3.4999999999999914e-50 + """) add_newdoc('numpy.core.umath', 'log1p', """ - `log(1 + x)` in base `e`, elementwise. + Return the natural logarithm of one plus the input array, element-wise. + + Calculates ``log(1 + x)``. Parameters ---------- @@ -1761,7 +1834,11 @@ add_newdoc('numpy.core.umath', 'log1p', Returns ------- y : ndarray - Natural logarithm of `1 + x`, elementwise. + Natural logarithm of `1 + x`, element-wise. + + See Also + -------- + expm1 : ``exp(x) - 1``, the inverse of `log1p`. Notes ----- @@ -2022,8 +2099,6 @@ add_newdoc('numpy.core.umath', 'minimum', add_newdoc('numpy.core.umath', 'fmax', """ - fmax(x1, x2[, out]) - Element-wise maximum of array elements. Compare two arrays and returns a new array containing the element-wise @@ -2132,7 +2207,7 @@ add_newdoc('numpy.core.umath', 'fmin', add_newdoc('numpy.core.umath', 'modf', """ - Return the fractional and integral part of a number. + Return the fractional and integral parts of an array, element-wise. The fractional and integral parts are negative if the given number is negative. @@ -2140,7 +2215,7 @@ add_newdoc('numpy.core.umath', 'modf', Parameters ---------- x : array_like - Input number. + Input array. Returns ------- @@ -2149,33 +2224,37 @@ add_newdoc('numpy.core.umath', 'modf', y2 : ndarray Integral part of `x`. + Notes + ----- + For integer input the return values are floats. + Examples -------- - >>> np.modf(2.5) - (0.5, 2.0) - >>> np.modf(-.4) - (-0.40000000000000002, -0.0) + >>> np.modf([0, 3.5]) + (array([ 0. , 0.5]), array([ 0., 3.])) + >>> np.modf(-0.5) + (-0.5, -0) """) add_newdoc('numpy.core.umath', 'multiply', """ - Multiply arguments elementwise. + Multiply arguments element-wise. Parameters ---------- x1, x2 : array_like - The arrays to be multiplied. + Input arrays to be multiplied. Returns ------- y : ndarray - The product of `x1` and `x2`, elementwise. Returns a scalar if + The product of `x1` and `x2`, element-wise. Returns a scalar if both `x1` and `x2` are scalars. Notes ----- - Equivalent to `x1` * `x2` in terms of array-broadcasting. + Equivalent to `x1` * `x2` in terms of array broadcasting. Examples -------- @@ -2353,23 +2432,34 @@ add_newdoc('numpy.core.umath', 'deg2rad', add_newdoc('numpy.core.umath', 'reciprocal', """ - Return element-wise reciprocal. + Return the reciprocal of the argument, element-wise. + + Calculates ``1/x``. Parameters ---------- x : array_like - Input value. + Input array. Returns ------- y : ndarray - Return value. + Return array. + + Notes + ----- + .. note:: + This function is not designed to work with integers. + + For integer arguments with absolute value larger than 1 the result is + always zero because of the way Python handles integer division. + For integer zero the result is an overflow. Examples -------- - >>> reciprocal(2.) + >>> np.reciprocal(2.) 0.5 - >>> reciprocal([1, 2., 3.33]) + >>> np.reciprocal([1, 2., 3.33]) array([ 1. , 0.5 , 0.3003003]) """) @@ -2378,7 +2468,7 @@ add_newdoc('numpy.core.umath', 'remainder', """ Returns element-wise remainder of division. - Computes `x1 - floor(x1/x2)*x2`. + Computes ``x1 - floor(x1/x2)*x2``. Parameters ---------- @@ -2390,22 +2480,23 @@ add_newdoc('numpy.core.umath', 'remainder', Returns ------- y : ndarray - The remainder of the quotient `x1/x2`, element-wise. Returns a scalar + The remainder of the quotient ``x1/x2``, element-wise. Returns a scalar if both `x1` and `x2` are scalars. See Also -------- - divide - floor + divide, floor Notes ----- - Returns 0 when `x2` is 0. + Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of) integers. Examples -------- - >>> np.remainder([4,7],[2,3]) + >>> np.remainder([4,7], [2,3]) array([0, 1]) + >>> np.remainder(np.arange(7), 5) + array([0, 1, 2, 3, 4, 0, 1]) """) @@ -2523,6 +2614,33 @@ add_newdoc('numpy.core.umath', 'signbit', """) +add_newdoc('numpy.core.umath', 'copysign', + """ + Change the sign of x to that of y element-wise. + + Parameters + ---------- + x: array_like + Values to change the sign of. + y: array_like + The sign of y is copied to x. + + Returns + ------- + out : array_like + values of x with the sign of y + + Examples + -------- + >>> np.copysign(1.3, -1) + -1.3 + >>> 1/np.copysign(0, 1) + inf + >>> 1/np.copysign(0, -1) + -inf + + """) + add_newdoc('numpy.core.umath', 'sin', """ Trigonometric sine, element-wise. @@ -2590,11 +2708,50 @@ add_newdoc('numpy.core.umath', 'sinh', ---------- x : array_like Input array. + out : ndarray, optional + Output array of same shape as `x`. Returns ------- - out : ndarray - Output array of same shape as `x`. + y : ndarray + The corresponding hyperbolic sine values. + + Raises + ------ + ValueError: invalid return array shape + if `out` is provided and `out.shape` != `x.shape` (See Examples) + + Notes + ----- + If `out` is provided, the function writes the result into it, + and returns a reference to `out`. (See Examples) + + References + ---------- + M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. + New York, NY: Dover, 1972, pg. 83. + + Examples + -------- + >>> import numpy as np + >>> np.sinh(0) + 0.0 + >>> np.sinh(np.pi*1j/2) + 1j + >>> np.sinh(np.pi*1j) + 1.2246063538223773e-016j (exact value is 0) + >>> # Discrepancy due to vagaries of floating point arithmetic. + >>> + >>> # Example of providing the optional output parameter + >>> out2 = np.sinh([0.1], out1) + >>> out2 is out1 + True + >>> + >>> # Example of ValueError due to provision of shape mis-matched `out` + >>> np.sinh(np.zeros((3,3)),np.zeros((2,2))) + Traceback (most recent call last): + File "<stdin>", line 1, in <module> + ValueError: invalid return array shape """) @@ -2667,13 +2824,12 @@ add_newdoc('numpy.core.umath', 'square', add_newdoc('numpy.core.umath', 'subtract', """ - Subtract arguments element-wise. + Subtract arguments, element-wise. Parameters ---------- x1, x2 : array_like - The arrays to be subtracted from each other. If type is 'array_like' - the `x1` and `x2` shapes must be identical. + The arrays to be subtracted from each other. Returns ------- @@ -2683,7 +2839,7 @@ add_newdoc('numpy.core.umath', 'subtract', Notes ----- - Equivalent to `x1` - `x2` in terms of array-broadcasting. + Equivalent to ``x1 - x2`` in terms of array broadcasting. Examples -------- @@ -2703,39 +2859,107 @@ add_newdoc('numpy.core.umath', 'tan', """ Compute tangent element-wise. + Equivalent to ``np.sin(x)/np.cos(x)`` element-wise. + Parameters ---------- x : array_like - Angles in radians. + Input array. + out : ndarray, optional + Output array of same shape as `x`. Returns ------- y : ndarray The corresponding tangent values. + Raises + ------ + ValueError: invalid return array shape + if `out` is provided and `out.shape` != `x.shape` (See Examples) + + Notes + ----- + If `out` is provided, the function writes the result into it, + and returns a reference to `out`. (See Examples) + + References + ---------- + M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. + New York, NY: Dover, 1972. Examples -------- >>> from math import pi >>> np.tan(np.array([-pi,pi/2,pi])) array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16]) + >>> + >>> # Example of providing the optional output parameter illustrating + >>> # that what is returned is a reference to said parameter + >>> out2 = np.cos([0.1], out1) + >>> out2 is out1 + True + >>> + >>> # Example of ValueError due to provision of shape mis-matched `out` + >>> np.cos(np.zeros((3,3)),np.zeros((2,2))) + Traceback (most recent call last): + File "<stdin>", line 1, in <module> + ValueError: invalid return array shape """) add_newdoc('numpy.core.umath', 'tanh', """ - Hyperbolic tangent element-wise. + Compute hyperbolic tangent element-wise. + + Equivalent to ``np.sinh(x)/np.cosh(x)`` or + ``-1j * np.tan(1j*x)``. Parameters ---------- x : array_like Input array. + out : ndarray, optional + Output array of same shape as `x`. Returns ------- y : ndarray The corresponding hyperbolic tangent values. + Raises + ------ + ValueError: invalid return array shape + if `out` is provided and `out.shape` != `x.shape` (See Examples) + + Notes + ----- + If `out` is provided, the function writes the result into it, + and returns a reference to `out`. (See Examples) + + References + ---------- + M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. + New York, NY: Dover, 1972, pg. 83. + + Examples + -------- + >>> import numpy as np + >>> np.tanh((0, np.pi*1j, np.pi*1j/2)) + array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j]) + >>> + >>> # Example of providing the optional output parameter illustrating + >>> # that what is returned is a reference to said parameter + >>> out2 = np.tanh([0.1], out1) + >>> out2 is out1 + True + >>> + >>> # Example of ValueError due to provision of shape mis-matched `out` + >>> np.tanh(np.zeros((3,3)),np.zeros((2,2))) + Traceback (most recent call last): + File "<stdin>", line 1, in <module> + ValueError: invalid return array shape + """) add_newdoc('numpy.core.umath', 'true_divide', diff --git a/numpy/core/defmatrix.py b/numpy/core/defmatrix.py index d1636e8b5..354e40060 100644 --- a/numpy/core/defmatrix.py +++ b/numpy/core/defmatrix.py @@ -2,7 +2,7 @@ __all__ = ['matrix', 'bmat', 'mat', 'asmatrix'] import sys import numeric as N -from numeric import concatenate, isscalar, binary_repr, identity +from numeric import concatenate, isscalar, binary_repr, identity, asanyarray from numerictypes import issubdtype # make translation table @@ -115,6 +115,7 @@ def matrix_power(M,n): [ 0, -1]]) """ + M = asanyarray(M) if len(M.shape) != 2 or M.shape[0] != M.shape[1]: raise ValueError("input must be a square array") if not issubdtype(type(n),int): @@ -490,6 +491,26 @@ class matrix(N.ndarray): return N.ndarray.prod(self, axis, dtype, out)._align(axis) def any(self, axis=None, out=None): + """ + Test whether any array element along a given axis evaluates to True. + + Refer to `numpy.any` for full documentation. + + Parameters + ---------- + axis: int, optional + Axis along which logical OR is performed + out: ndarray, optional + Output to existing array instead of creating new one, must have + same shape as expected output + + Returns + ------- + any : bool, ndarray + Returns a single bool if `axis` is ``None``; otherwise, + returns `ndarray` + + """ return N.ndarray.any(self, axis, out)._align(axis) def all(self, axis=None, out=None): diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 99b837ba2..f7f584d3d 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -256,15 +256,14 @@ def repeat(a, repeats, axis=None): def put(a, ind, v, mode='raise'): """ - Changes specific elements of one array by replacing from another array. + Replaces specified elements of an array with given values. - The indexing works on the flattened target array, `put` is roughly + The indexing works on the flattened target array. `put` is roughly equivalent to: :: - for i, val in zip(ind, v): - x.flat[i] = val + a.flat[ind] = v Parameters ---------- @@ -292,14 +291,14 @@ def put(a, ind, v, mode='raise'): Examples -------- - >>> x = np.arange(5) - >>> np.put(x, [0, 2], [-44, -55]) - >>> x + >>> a = np.arange(5) + >>> np.put(a, [0, 2], [-44, -55]) + >>> a array([-44, 1, -55, 3, 4]) - >>> x = np.arange(5) - >>> np.put(x, 22, -5, mode='clip') - >>> x + >>> a = np.arange(5) + >>> np.put(a, 22, -5, mode='clip') + >>> a array([ 0, 1, 2, 3, -5]) """ @@ -450,6 +449,22 @@ def sort(a, axis=-1, kind='quicksort', order=None): the last axis is faster and uses less space than sorting along any other axis. + The sort order for complex numbers is lexicographic. If both the real + and imaginary parts are non-nan then the order is determined by the + real parts except when they are equal, in which case the order is + determined by the imaginary parts. + + Previous to numpy 1.4.0 sorting real and complex arrays containing nan + values led to undefined behaviour. In numpy versions >= 1.4.0 nan + values are sorted to the end. The extended sort order is: + + Real: [R, nan] + Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] + + where R is a non-nan real value. Complex values with the same nan + placements are sorted according to the non-nan part if it exists. + Non-nan values are sorted as before. + Examples -------- >>> a = np.array([[1,4],[3,1]]) @@ -529,6 +544,9 @@ def argsort(a, axis=-1, kind='quicksort', order=None): ----- See `sort` for notes on the different sorting algorithms. + As of Numpy 1.4.0 argsort works with real/complex arrays containing + nan values. The enhanced sort order is documented in the numpy.sort. + Examples -------- One dimensional array: @@ -665,6 +683,9 @@ def searchsorted(a, v, side='left'): ----- Binary search is used to find the required insertion points. + As of Numpy 1.4.0 searchsorted works with real/complex arrays containing + nan values. The enhanced sort order is documented in the numpy.sort. + Examples -------- >>> np.searchsorted([1,2,3,4,5], 3) @@ -1086,10 +1107,14 @@ def compress(condition, a, axis=None, out=None): """ Return selected slices of an array along given axis. + When working along a given axis, a slice along that axis is returned in + `output` for each index where `condition` evaluates to True. When + working on a 1-D array, `compress` is equivalent to `extract`. + Parameters ---------- - condition : array_like - Boolean 1-D array selecting which entries to return. If len(condition) + condition : 1-D array of bools + Array that selects which entries to return. If len(condition) is less than the size of `a` along the given axis, then output is truncated to the length of the condition array. a : array_like @@ -1109,18 +1134,31 @@ def compress(condition, a, axis=None, out=None): See Also -------- - ndarray.compress: Equivalent method. + take, choose, diag, diagonal, select + ndarray.compress : Equivalent method. Examples -------- - >>> a = np.array([[1, 2], [3, 4]]) + >>> a = np.array([[1, 2], [3, 4], [5, 6]]) + >>> a + array([[1, 2], + [3, 4], + [5, 6]]) >>> np.compress([0, 1], a, axis=0) array([[3, 4]]) - >>> np.compress([1], a, axis=1) - array([[1], - [3]]) - >>> np.compress([0,1,1], a) - array([2, 3]) + >>> np.compress([False, True, True], a, axis=0) + array([[3, 4], + [5, 6]]) + >>> np.compress([False, True], a, axis=1) + array([[2], + [4], + [6]]) + + Working on the flattened array does not return slices along an axis but + selects elements. + + >>> np.compress([False, True], a) + array([2]) """ try: @@ -1306,6 +1344,8 @@ def any(a,axis=None, out=None): """ Test whether any array element along a given axis evaluates to True. + Returns single boolean unless `axis` is not ``None`` + Parameters ---------- a : array_like @@ -1322,8 +1362,8 @@ def any(a,axis=None, out=None): Returns ------- - any : ndarray, bool - A new boolean or array is returned unless `out` is + any : bool, ndarray + A new boolean or `ndarray` is returned unless `out` is specified, in which case a reference to `out` is returned. See Also @@ -1429,12 +1469,10 @@ def cumsum (a, axis=None, dtype=None, out=None): Parameters ---------- a : array_like - Input array or object that can be converted to an array. + Input array. axis : int, optional Axis along which the cumulative sum is computed. The default - (`axis` = `None`) is to compute the cumsum over the flattened - array. `axis` may be negative, in which case it counts from the - last to the first axis. + (None) is to compute the cumsum over the flattened array. dtype : dtype, optional Type of the returned array and of the accumulator in which the elements are summed. If `dtype` is not specified, it defaults @@ -1459,11 +1497,12 @@ def cumsum (a, axis=None, dtype=None, out=None): Examples -------- - >>> a = np.array([[1,2,3],[4,5,6]]) + >>> a = np.array([[1,2,3], [4,5,6]]) >>> np.cumsum(a) array([ 1, 3, 6, 10, 15, 21]) - >>> np.cumsum(a,dtype=float) # specifies type of output value(s) + >>> np.cumsum(a, dtype=float) # specifies type of output value(s) array([ 1., 3., 6., 10., 15., 21.]) + >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns array([[1, 2, 3], [5, 7, 9]]) @@ -2122,14 +2161,13 @@ def std(a, axis=None, dtype=None, out=None, ddof=0): Returns ------- - standard_deviation : {ndarray, scalar}; see dtype parameter above. + standard_deviation : ndarray, see dtype parameter above. If `out` is None, return a new array containing the standard deviation, otherwise return a reference to the output array. See Also -------- - numpy.var : Variance - numpy.mean : Average + var, mean Notes ----- @@ -2145,7 +2183,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0): is the square root of the estimated variance, so even with ``ddof=1``, it will not be an unbiased estimate of the standard deviation per se. - Note that, for complex numbers, std takes the absolute + Note that, for complex numbers, `std` takes the absolute value before squaring, so that the result is always real and nonnegative. Examples @@ -2153,9 +2191,9 @@ def std(a, axis=None, dtype=None, out=None, ddof=0): >>> a = np.array([[1, 2], [3, 4]]) >>> np.std(a) 1.1180339887498949 - >>> np.std(a, 0) + >>> np.std(a, axis=0) array([ 1., 1.]) - >>> np.std(a, 1) + >>> np.std(a, axis=1) array([ 0.5, 0.5]) """ diff --git a/numpy/core/include/numpy/_neighborhood_iterator_imp.h b/numpy/core/include/numpy/_neighborhood_iterator_imp.h new file mode 100644 index 000000000..5a73784c1 --- /dev/null +++ b/numpy/core/include/numpy/_neighborhood_iterator_imp.h @@ -0,0 +1,90 @@ +#ifndef _NPY_INCLUDE_NEIGHBORHOOD_IMP +#error You should not include this header directly +#endif +/* + * Private API (here for inline) + */ +static NPY_INLINE int +_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter); + +/* + * Update to next item of the iterator + * + * Note: this simply increment the coordinates vector, last dimension + * incremented first , i.e, for dimension 3 + * ... + * -1, -1, -1 + * -1, -1, 0 + * -1, -1, 1 + * .... + * -1, 0, -1 + * -1, 0, 0 + * .... + * 0, -1, -1 + * 0, -1, 0 + * .... + */ +#define _UPDATE_COORD_ITER(c) \ + wb = iter->coordinates[c] < iter->bounds[c][1]; \ + if (wb) { \ + iter->coordinates[c] += 1; \ + return 0; \ + } \ + else { \ + iter->coordinates[c] = iter->bounds[c][0]; \ + } + +static NPY_INLINE int +_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter) +{ + int i, wb; + + for (i = iter->nd - 1; i >= 0; --i) { + _UPDATE_COORD_ITER(i) + } + + return 0; +} + +/* + * Version optimized for 2d arrays, manual loop unrolling + */ +static NPY_INLINE int +_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter) +{ + int wb; + + _UPDATE_COORD_ITER(1) + _UPDATE_COORD_ITER(0) + + return 0; +} +#undef _UPDATE_COORD_ITER + +/* + * Advance to the next neighbour + */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter) +{ + _PyArrayNeighborhoodIter_IncrCoord (iter); + iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + + return 0; +} + +/* + * Reset functions + */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter) +{ + int i; + + for (i = 0; i < iter->nd; ++i) { + iter->coordinates[i] = iter->bounds[i][0]; + } + iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + + return 0; +} diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h index 8d0e444a2..34b080732 100644 --- a/numpy/core/include/numpy/ndarrayobject.h +++ b/numpy/core/include/numpy/ndarrayobject.h @@ -735,7 +735,18 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); #define NPY_DISABLE_C_API #endif -typedef struct { +/***************************** + * Basic iterator object + *****************************/ + +/* FWD declaration */ +typedef struct PyArrayIterObject_tag PyArrayIterObject; + +/* type of the function which translates a set of coordinates to a pointer to + * the data */ +typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); + +struct PyArrayIterObject_tag { PyObject_HEAD int nd_m1; /* number of dimensions - 1 */ npy_intp index, size; @@ -747,7 +758,12 @@ typedef struct { PyArrayObject *ao; char *dataptr; /* pointer to current item*/ npy_bool contiguous; -} PyArrayIterObject; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; +} ; /* Iterator API */ @@ -971,6 +987,72 @@ typedef struct { } PyArrayMapIterObject; +enum { + NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, + NPY_NEIGHBORHOOD_ITER_ONE_PADDING, + NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, + NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, + NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING, +}; + +typedef struct { + PyObject_HEAD + + /* + * PyArrayIterObject part: keep this in this exact order + */ + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; + + /* + * New members + */ + npy_intp nd; + + /* Dimensions is the dimension of the array */ + npy_intp dimensions[NPY_MAXDIMS]; + + /* Neighborhood points coordinates are computed relatively to the point pointed + * by _internal_iter */ + PyArrayIterObject* _internal_iter; + /* To keep a reference to the representation of the constant value for + * constant padding */ + char* constant; + + int mode; +} PyArrayNeighborhoodIterObject; + +/* + * Neighborhood iterator API + */ + +/* General: those work for any mode */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); +static NPY_INLINE int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); +// static NPY_INLINE int +// PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); + +/* Include inline implementations - functions defined there are not considered + * public API */ +#define _NPY_INCLUDE_NEIGHBORHOOD_IMP +#include "_neighborhood_iterator_imp.h" +#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP + /* The default array type */ #define NPY_DEFAULT_TYPE NPY_DOUBLE @@ -1112,7 +1194,7 @@ typedef struct { #define NPY_SWAP 's' #define NPY_IGNORE '|' -#ifdef NPY_BIG_ENDIAN +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN #define NPY_NATBYTE NPY_BIG #define NPY_OPPBYTE NPY_LITTLE #else diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h index 106901e55..4d6579189 100644 --- a/numpy/core/include/numpy/npy_cpu.h +++ b/numpy/core/include/numpy/npy_cpu.h @@ -8,7 +8,12 @@ * NPY_CPU_SPARC * NPY_CPU_S390 * NPY_CPU_IA64 - * NPY_CPU_PARISC + * NPY_CPU_HPPA + * NPY_CPU_ALPHA + * NPY_CPU_ARMEL + * NPY_CPU_ARMEB + * NPY_CPU_SH_LE + * NPY_CPU_SH_BE */ #ifndef _NPY_CPUARCH_H_ #define _NPY_CPUARCH_H_ @@ -42,9 +47,18 @@ #define NPY_CPU_S390 #elif defined(__ia64) #define NPY_CPU_IA64 -#elif defined(__parisc__) - /* XXX: Not sure about this one... */ - #define NPY_CPU_PARISC +#elif defined(__hppa__) + #define NPY_CPU_HPPA +#elif defined(__alpha__) + #define NPY_CPU_ALPHA +#elif defined(__arm__) && defined(__ARMEL__) + #define NPY_CPU_ARMEL +#elif defined(__arm__) && defined(__ARMEB__) + #define NPY_CPU_ARMEB +#elif defined(__sh__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_SH_LE +#elif defined(__sh__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_SH_BE #else #error Unknown CPU, please report this to numpy maintainers with \ information about your platform (OS, CPU and compiler) diff --git a/numpy/core/include/numpy/npy_endian.h b/numpy/core/include/numpy/npy_endian.h index 0a5c05ef9..f3ae4dbff 100644 --- a/numpy/core/include/numpy/npy_endian.h +++ b/numpy/core/include/numpy/npy_endian.h @@ -9,26 +9,32 @@ #ifdef NPY_HAVE_ENDIAN_H /* Use endian.h if available */ #include <endian.h> + #define NPY_BYTE_ORDER __BYTE_ORDER - #if (__BYTE_ORDER == __LITTLE_ENDIAN) - #define NPY_LITTLE_ENDIAN - #elif (__BYTE_ORDER == __BIG_ENDIAN) - #define NPY_BIG_ENDIAN - #else - #error Unknown machine endianness detected. - #endif + #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN + #define NPY_BIG_ENDIAN __BIG_ENDIAN #else /* Set endianness info using target CPU */ #include "npy_cpu.h" - #if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64)\ - || defined(NPY_CPU_IA64) - #define NPY_LITTLE_ENDIAN - #define NPY_BYTE_ORDER 1234 - #elif defined(NPY_CPU_PPC) || defined(NPY_CPU_SPARC)\ - || defined(NPY_CPU_S390) || defined(NPY_CPU_PARISC) || defined(NPY_CPU_PPC64) - #define NPY_BIG_ENDIAN - #define NPY_BYTE_ORDER 4321 + #define NPY_LITTLE_ENDIAN 1234 + #define NPY_BIG_ENDIAN 4321 + + #if defined(NPY_CPU_X86) \ + || defined(NPY_CPU_AMD64) \ + || defined(NPY_CPU_IA64) \ + || defined(NPY_CPU_ALPHA) \ + || defined(NPY_CPU_ARMEL) \ + || defined(NPY_CPU_SH_LE) + #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN + #elif defined(NPY_CPU_PPC) \ + || defined(NPY_CPU_SPARC) \ + || defined(NPY_CPU_S390) \ + || defined(NPY_CPU_HPPA) \ + || defined(NPY_CPU_PPC64) \ + || defined(NPY_CPU_ARMEB) \ + || defined(NPY_CPU_SH_BE) + #define NPY_BYTE_ORDER NPY_BIG_ENDIAN #else #error Unknown CPU: can not set endianness #endif diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h index 2a8ea182b..c219504e4 100644 --- a/numpy/core/include/numpy/npy_math.h +++ b/numpy/core/include/numpy/npy_math.h @@ -52,38 +52,47 @@ NPY_INLINE static float __npy_nzerof(void) /* * Useful constants */ -#define NPY_E 2.7182818284590452353602874713526625 /* e */ -#define NPY_LOG2E 1.4426950408889634073599246810018921 /* log_2 e */ -#define NPY_LOG10E 0.4342944819032518276511289189166051 /* log_10 e */ -#define NPY_LOGE2 0.6931471805599453094172321214581766 /* log_e 2 */ -#define NPY_LOGE10 2.3025850929940456840179914546843642 /* log_e 10 */ -#define NPY_PI 3.1415926535897932384626433832795029 /* pi */ -#define NPY_PI_2 1.5707963267948966192313216916397514 /* pi/2 */ -#define NPY_PI_4 0.7853981633974483096156608458198757 /* pi/4 */ -#define NPY_1_PI 0.3183098861837906715377675267450287 /* 1/pi */ -#define NPY_2_PI 0.6366197723675813430755350534900574 /* 2/pi */ - -#define NPY_Ef 2.7182818284590452353602874713526625F /* e */ -#define NPY_LOG2Ef 1.4426950408889634073599246810018921F /* log_2 e */ -#define NPY_LOG10Ef 0.4342944819032518276511289189166051F /* log_10 e */ -#define NPY_LOGE2f 0.6931471805599453094172321214581766F /* log_e 2 */ -#define NPY_LOGE10f 2.3025850929940456840179914546843642F /* log_e 10 */ -#define NPY_PIf 3.1415926535897932384626433832795029F /* pi */ -#define NPY_PI_2f 1.5707963267948966192313216916397514F /* pi/2 */ -#define NPY_PI_4f 0.7853981633974483096156608458198757F /* pi/4 */ -#define NPY_1_PIf 0.3183098861837906715377675267450287F /* 1/pi */ -#define NPY_2_PIf 0.6366197723675813430755350534900574F /* 2/pi */ - -#define NPY_El 2.7182818284590452353602874713526625L /* e */ -#define NPY_LOG2El 1.4426950408889634073599246810018921L /* log_2 e */ -#define NPY_LOG10El 0.4342944819032518276511289189166051L /* log_10 e */ -#define NPY_LOGE2l 0.6931471805599453094172321214581766L /* log_e 2 */ -#define NPY_LOGE10l 2.3025850929940456840179914546843642L /* log_e 10 */ -#define NPY_PIl 3.1415926535897932384626433832795029L /* pi */ -#define NPY_PI_2l 1.5707963267948966192313216916397514L /* pi/2 */ -#define NPY_PI_4l 0.7853981633974483096156608458198757L /* pi/4 */ -#define NPY_1_PIl 0.3183098861837906715377675267450287L /* 1/pi */ -#define NPY_2_PIl 0.6366197723675813430755350534900574L /* 2/pi */ +#define NPY_E 2.718281828459045235360287471352662498 /* e */ +#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */ +#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */ +#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */ +#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */ +#define NPY_PI 3.141592653589793238462643383279502884 /* pi */ +#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */ +#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */ +#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */ +#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */ +#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */ +#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */ +#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */ + +#define NPY_Ef 2.718281828459045235360287471352662498F /* e */ +#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */ +#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */ +#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */ +#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */ +#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */ +#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */ +#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */ +#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */ +#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */ +#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constan*/ +#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */ +#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */ + +#define NPY_El 2.718281828459045235360287471352662498L /* e */ +#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */ +#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */ +#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */ +#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */ +#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */ +#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */ +#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */ +#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */ +#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */ +#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constan*/ +#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */ +#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */ /* * C99 double math funcs @@ -128,6 +137,8 @@ double npy_atan2(double x, double y); double npy_pow(double x, double y); double npy_modf(double x, double* y); +double npy_copysign(double x, double y); + /* * IEEE 754 fpu handling. Those are guaranteed to be macros */ @@ -198,6 +209,8 @@ float npy_fmodf(float x, float y); float npy_modff(float x, float* y); +float npy_copysignf(float x, float y); + /* * float C99 math functions */ @@ -235,6 +248,8 @@ npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y); npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); +npy_longdouble npy_copysignl(npy_longdouble x, npy_longdouble y); + /* * Non standard functions */ diff --git a/numpy/core/include/numpy/numpyconfig.h.in b/numpy/core/include/numpy/numpyconfig.h.in index 9c3f40d17..b3c0d851d 100644 --- a/numpy/core/include/numpy/numpyconfig.h.in +++ b/numpy/core/include/numpy/numpyconfig.h.in @@ -21,10 +21,10 @@ @DEFINE_NPY_SIZEOF_PY_LONG_LONG@ #define NPY_INLINE @NPY_INLINE@ -#define NPY_ENABLE_SEPARATE_COMPILATION @NPY_ENABLE_SEPARATE_COMPILATION@ +@DEFINE_NPY_ENABLE_SEPARATE_COMPILATION@ #define NPY_VISIBILITY_HIDDEN @VISIBILITY_HIDDEN@ -#define NPY_USE_C99_FORMATS @USE_C99_FORMATS@ +@DEFINE_NPY_USE_C99_FORMATS@ #define NPY_ABI_VERSION @NPY_ABI_VERSION@ #define NPY_API_VERSION @NPY_API_VERSION@ diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py index 65f4938fe..2392c3aa7 100644 --- a/numpy/core/memmap.py +++ b/numpy/core/memmap.py @@ -17,7 +17,7 @@ mode_equivalents = { class memmap(ndarray): """ - Create a memory-map to an array stored in a file on disk. + Create a memory-map to an array stored in a *binary* file on disk. Memory-mapped files are used for accessing small segments of large files on disk, without reading the entire file into memory. Numpy's @@ -58,7 +58,7 @@ class memmap(ndarray): order : {'C', 'F'}, optional Specify the order of the ndarray memory layout: C (row-major) or Fortran (column-major). This only has an effect if the shape is - greater than 1-D. The defaullt order is 'C'. + greater than 1-D. The default order is 'C'. Methods ------- diff --git a/numpy/core/mlib.ini.in b/numpy/core/mlib.ini.in new file mode 100644 index 000000000..badaa2ae9 --- /dev/null +++ b/numpy/core/mlib.ini.in @@ -0,0 +1,12 @@ +[meta] +Name = mlib +Description = Math library used with this version of numpy +Version = 1.0 + +[default] +Libs=@posix_mathlib@ +Cflags= + +[msvc] +Libs=@msvc_mathlib@ +Cflags= diff --git a/numpy/core/npymath.ini.in b/numpy/core/npymath.ini.in new file mode 100644 index 000000000..73379e47c --- /dev/null +++ b/numpy/core/npymath.ini.in @@ -0,0 +1,19 @@ +[meta] +Name=npymath +Description=Portable, core math library implementing C99 standard +Version=0.1 + +[variables] +prefix=@prefix@ +libdir=${prefix}@sep@lib +includedir=${prefix}@sep@include + +[default] +Libs=-L${libdir} -lnpymath +Cflags=-I${includedir} +Requires=mlib + +[msvc] +Libs=/LIBPATH:${libdir} npymath.lib +Cflags=/INCLUDE:${includedir} +Requires=mlib diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 50e3fd75b..c961bcc0f 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -6,7 +6,7 @@ __all__ = ['newaxis', 'ndarray', 'flatiter', 'ufunc', 'set_numeric_ops', 'can_cast', 'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray', 'isfortran', 'empty_like', 'zeros_like', - 'correlate', 'acorrelate', 'convolve', 'inner', 'dot', 'outer', 'vdot', + 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot', 'array2string', 'get_printoptions', 'set_printoptions', 'array_repr', 'array_str', 'set_string_function', @@ -22,6 +22,7 @@ __all__ = ['newaxis', 'ndarray', 'flatiter', 'ufunc', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS'] import sys +import warnings import multiarray import umath from umath import * @@ -362,24 +363,52 @@ def require(a, dtype=None, requirements=None): Parameters ---------- a : array_like - The object to be converted to a type-and-requirement satisfying array + The object to be converted to a type-and-requirement-satisfying array. dtype : data-type - The required data-type (None is the default data-type -- float64) - requirements : list of strings + The required data-type, the default data-type is float64). + requirements : str or list of str The requirements list can be any of the following - * 'ENSUREARRAY' ('E') - ensure that a base-class ndarray * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array * 'ALIGNED' ('A') - ensure a data-type aligned array - * 'WRITEABLE' ('W') - ensure a writeable array + * 'WRITEABLE' ('W') - ensure a writable array * 'OWNDATA' ('O') - ensure an array that owns its own data + See Also + -------- + asarray : Convert input to an ndarray. + asanyarray : Convert to an ndarray, but pass through ndarray subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major + memory order. + ndarray.flags : Information about the memory layout of the array. + Notes ----- The returned array will be guaranteed to have the listed requirements by making a copy if needed. + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> x.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : False + WRITEABLE : True + ALIGNED : True + UPDATEIFCOPY : False + + >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) + >>> y.flags + C_CONTIGUOUS : False + F_CONTIGUOUS : True + OWNDATA : True + WRITEABLE : True + ALIGNED : True + UPDATEIFCOPY : False + """ if requirements is None: requirements = [] @@ -507,7 +536,7 @@ def argwhere(a): [1, 2]]) """ - return asarray(a.nonzero()).T + return transpose(asanyarray(a).nonzero()) def flatnonzero(a): """ @@ -557,7 +586,7 @@ def _mode_from_name(mode): return _mode_from_name_dict[mode.lower()[0]] return mode -def correlate(a,v,mode='valid'): +def correlate(a,v,mode='valid',old_behavior=True): """ Discrete, linear correlation of two 1-dimensional sequences. @@ -574,51 +603,48 @@ def correlate(a,v,mode='valid'): mode : {'valid', 'same', 'full'}, optional Refer to the `convolve` docstring. Note that the default is `valid`, unlike `convolve`, which uses `full`. + old_behavior : bool + If True, uses the old, numeric behavior (correlate(a,v) == correlate(v, + a), and the conjugate is not taken for complex arrays). If False, uses + the conventional signal processing definition (see note). See Also -------- convolve : Discrete, linear convolution of two one-dimensional sequences. - acorrelate: Discrete correlation following the usual signal processing - definition for complex arrays, and without assuming that - correlate(a, b) == correlate(b, a) - """ - mode = _mode_from_name(mode) - return multiarray.correlate(a,v,mode) - -def acorrelate(a, v, mode='valid'): - """ - Discrete, linear correlation of two 1-dimensional sequences. - This function computes the correlation as generally defined in signal - processing texts: + Note + ---- + If old_behavior is False, this function computes the correlation as + generally defined in signal processing texts:: - z[k] = sum_n a[n] * conj(v[n+k]) + z[k] = sum_n a[n] * conj(v[n+k]) with a and v sequences being zero-padded where necessary and conj being the conjugate. - Parameters - ---------- - a, v : array_like - Input sequences. - mode : {'valid', 'same', 'full'}, optional - Refer to the `convolve` docstring. Note that the default - is `valid`, unlike `convolve`, which uses `full`. - - Note - ---- - This is the function which corresponds to matlab xcorr. - - See Also + Examples -------- - convolve : Discrete, linear convolution of two - one-dimensional sequences. - correlate: Deprecated function to compute correlation + >>> np.correlate([1, 2, 3], [0, 1, 0.5]) + array([ 3.5]) + >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") + array([ 2. , 3.5, 3. ]) + >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full") + array([ 0.5, 2. , 3.5, 3. , 0. ]) + """ mode = _mode_from_name(mode) - return multiarray.acorrelate(a, v, mode) - + if old_behavior: + warnings.warn(""" +The current behavior of correlate is deprecated for 1.4.0, and will be removed +for NumPy 1.5.0. + +The new behavior fits the conventional definition of correlation: inputs are +never swapped, and the second argument is conjugated for complex arrays.""", + DeprecationWarning) + return multiarray.correlate(a,v,mode) + else: + return multiarray.correlate2(a,v,mode) def convolve(a,v,mode='full'): """ @@ -1170,20 +1196,26 @@ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): Parameters ---------- arr : ndarray - Input array. - max_line_width : int - The maximum number of columns the string should span. Newline - characters splits the string appropriately after array elements. - precision : int - Floating point precision. - suppress_small : bool - Represent very small numbers as zero. + Input array. + max_line_width : int, optional + The maximum number of columns the string should span. Newline + characters split the string appropriately after array elements. + precision : int, optional + Floating point precision. Default is the current printing precision + (usually 8), which can be altered using `set_printoptions`. + suppress_small : bool, optional + Represent very small numbers as zero, default is False. Very small + is defined by `precision`, if the precision is 8 then + numbers smaller than 5e-9 are represented as zero. Returns ------- string : str The string representation of an array. + See Also + -------- + array_str, array2string, set_printoptions Examples -------- @@ -1194,6 +1226,10 @@ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): >>> np.array_repr(np.array([], np.int32)) 'array([], dtype=int32)' + >>> x = np.array([1e-6, 4e-7, 2, 3]) + >>> np.array_repr(x, precision=6, suppress_small=True) + 'array([ 0.000001, 0. , 2. , 3. ])' + """ if arr.size > 0 or arr.shape==(0,): lst = array2string(arr, max_line_width, precision, suppress_small, @@ -1221,7 +1257,11 @@ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): def array_str(a, max_line_width=None, precision=None, suppress_small=None): """ - Return a string representation of an array. + Return a string representation of the data in an array. + + The data in the array is returned as a single string. This function + is similar to `array_repr`, the difference is that `array_repr` also + returns information on the type of array and data type. Parameters ---------- @@ -1230,13 +1270,16 @@ def array_str(a, max_line_width=None, precision=None, suppress_small=None): max_line_width : int, optional Inserts newlines if text is longer than `max_line_width`. precision : int, optional - If `a` is float, `precision` sets floating point precision. - suppress_small : boolean, optional - Represent very small numbers as zero. + Floating point precision. Default is the current printing precision + (usually 8), which can be altered using set_printoptions. + suppress_small : bool, optional + Represent very small numbers as zero, default is False. Very small is + defined by precision, if the precision is 8 then numbers smaller than + 5e-9 are represented as zero. See Also -------- - array2string, array_repr + array2string, array_repr, set_printoptions Examples -------- @@ -1264,8 +1307,8 @@ def indices(dimensions, dtype=int): ---------- dimensions : sequence of ints The shape of the grid. - dtype : optional - Data_type of the result. + dtype : dtype, optional + Data type of the result. Returns ------- @@ -1291,7 +1334,7 @@ def indices(dimensions, dtype=int): Examples -------- - >>> grid = np.indices((2,3)) + >>> grid = np.indices((2, 3)) >>> grid.shape (2,2,3) >>> grid[0] # row indices @@ -1301,6 +1344,17 @@ def indices(dimensions, dtype=int): array([[0, 1, 2], [0, 1, 2]]) + The indices can be used as an index into an array. + + >>> x = np.arange(20).reshape(5, 4) + >>> row, col = np.indices((2, 3)) + >>> x[row, col] + array([[0, 1, 2], + [4, 5, 6]]) + + Note that it would be more straightforward in the above example to + extract the required elements directly with ``x[:2, :3]``. + """ dimensions = tuple(dimensions) N = len(dimensions) @@ -1638,15 +1692,9 @@ def identity(n, dtype=None): [ 0., 0., 1.]]) """ - a = array([1]+n*[0],dtype=dtype) - b = empty((n,n),dtype=dtype) - - # Note that this assignment depends on the convention that since the a - # array is shorter than the flattened b array, then the a array will - # be repeated until it is the appropriate size. Given a's construction, - # this nicely sets the diagonal to all ones. - b.flat = a - return b + a = zeros((n,n), dtype=dtype) + a.flat[::n+1] = 1 + return a def allclose(a, b, rtol=1.e-5, atol=1.e-8): """ @@ -1816,22 +1864,24 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): Parameters ---------- - all : {'ignore', 'warn', 'raise', 'call'}, optional + all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional Set treatment for all types of floating-point errors at once: - - ignore: Take no action when the exception occurs - - warn: Print a `RuntimeWarning` (via the Python `warnings` module) - - raise: Raise a `FloatingPointError` + - ignore: Take no action when the exception occurs. + - warn: Print a `RuntimeWarning` (via the Python `warnings` module). + - raise: Raise a `FloatingPointError`. - call: Call a function specified using the `seterrcall` function. + - print: Print a warning directly to ``stdout``. + - log: Record error in a Log object specified by `seterrcall`. The default is not to change the current behavior. - divide : {'ignore', 'warn', 'raise', 'call'}, optional + divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional Treatment for division by zero. - over : {'ignore', 'warn', 'raise', 'call'}, optional + over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional Treatment for floating-point overflow. - under : {'ignore', 'warn', 'raise', 'call'}, optional + under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional Treatment for floating-point underflow. - invalid : {'ignore', 'warn', 'raise', 'call'}, optional + invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional Treatment for invalid floating-point operation. Returns @@ -1859,22 +1909,25 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): Examples -------- - - Set mode: - - >>> seterr(over='raise') # doctest: +SKIP + >>> np.seterr(over='raise') {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'} + >>> np.seterr(all='ignore') # reset to default + {'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'} - >>> old_settings = seterr(all='warn', over='raise') # doctest: +SKIP - - >>> int16(32000) * int16(3) # doctest: +SKIP + >>> np.int16(32000) * np.int16(3) + 30464 + >>> old_settings = np.seterr(all='warn', over='raise') + >>> np.int16(32000) * np.int16(3) Traceback (most recent call last): - File "<stdin>", line 1, in ? + File "<stdin>", line 1, in <module> FloatingPointError: overflow encountered in short_scalars - >>> seterr(all='ignore') # doctest: +SKIP - {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore', - 'under': 'ignore'} + + >>> np.seterr(all='print') + {'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'} + >>> np.int16(32000) * np.int16(3) + Warning: overflow encountered in short_scalars + 30464 """ @@ -1897,11 +1950,41 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): def geterr(): - """Get the current way of handling floating-point errors. + """ + Get the current way of handling floating-point errors. + + Returns + ------- + res : dict + A dictionary with keys "divide", "over", "under", and "invalid", + whose values are from the strings "ignore", "print", "log", "warn", + "raise", and "call". The keys represent possible floating-point + exceptions, and the values define how these exceptions are handled. + + See Also + -------- + geterrcall, seterr, seterrcall + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> np.geterr() # default is all set to 'ignore' + {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore', + 'under': 'ignore'} + >>> np.arange(3.) / np.arange(3.) + array([ NaN, 1., 1.]) + + >>> oldsettings = np.seterr(all='warn', over='raise') + >>> np.geterr() + {'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'} + >>> np.arange(3.) / np.arange(3.) + __main__:1: RuntimeWarning: invalid value encountered in divide + array([ NaN, 1., 1.]) - Returns a dictionary with entries "divide", "over", "under", and - "invalid", whose values are from the strings - "ignore", "print", "log", "warn", "raise", and "call". """ maskvalue = umath.geterrobj()[1] mask = 7 @@ -1952,13 +2035,13 @@ def seterrcall(func): is to set the error-handler to 'call', using `seterr`. Then, set the function to call using this function. - The second is to set the error-handler to `log`, using `seterr`. + The second is to set the error-handler to 'log', using `seterr`. Floating-point errors then trigger a call to the 'write' method of the provided object. Parameters ---------- - log_func_or_obj : callable f(err, flag) or object with write method + func : callable f(err, flag) or object with write method Function to call upon floating-point errors ('call'-mode) or object whose 'write' method is used to log such message ('log'-mode). @@ -1971,7 +2054,7 @@ def seterrcall(func): In other words, ``flags = divide + 2*over + 4*under + 8*invalid``. - If an object is provided, it's write method should take one argument, + If an object is provided, its write method should take one argument, a string. Returns @@ -1979,6 +2062,10 @@ def seterrcall(func): h : callable or log instance The old error handler. + See Also + -------- + seterr, geterr, geterrcall + Examples -------- Callback upon error: @@ -2025,7 +2112,45 @@ def seterrcall(func): return old def geterrcall(): - """Return the current callback function used on floating-point errors. + """ + Return the current callback function used on floating-point errors. + + When the error handling for a floating-point error (one of "divide", + "over", "under", or "invalid") is set to 'call' or 'log', the function + that is called or the log instance that is written to is returned by + `geterrcall`. This function or log instance has been set with + `seterrcall`. + + Returns + ------- + errobj : callable, log instance or None + The current error handler. If no handler was set through `seterrcall`, + ``None`` is returned. + + See Also + -------- + seterrcall, seterr, geterr + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> np.geterrcall() # we did not yet set a handler, returns None + + >>> oldsettings = np.seterr(all='call') + >>> def err_handler(type, flag): + ... print "Floating point error (%s), with flag %s" % (type, flag) + >>> oldhandler = np.seterrcall(err_handler) + >>> np.array([1,2,3])/0.0 + Floating point error (divide by zero), with flag 1 + array([ Inf, Inf, Inf]) + >>> cur_handler = np.geterrcall() + >>> cur_handler is err_handler + True + """ return umath.geterrobj()[2] diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index c72cc122a..70315f1e0 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -521,6 +521,9 @@ def issubdtype(arg1, arg2): arg2 : dtype_like dtype or string representing a typecode. + Returns + ------- + out : bool See Also -------- @@ -660,14 +663,24 @@ def _find_common_coerce(a, b): thisind = __test_types.index(a.char) except ValueError: return None + return _can_coerce_all([a,b], start=thisind) + +# Find a data-type that all data-types in a list can be coerced to +def _can_coerce_all(dtypelist, start=0): + N = len(dtypelist) + if N == 0: + return None + if N == 1: + return dtypelist[0] + thisind = start while thisind < __len_test_types: newdtype = dtype(__test_types[thisind]) - if newdtype >= b and newdtype >= a: + numcoerce = len([x for x in dtypelist if newdtype >= x]) + if numcoerce == N: return newdtype thisind += 1 return None - def find_common_type(array_types, scalar_types): """ Determine common type following standard coercion rules @@ -696,16 +709,14 @@ def find_common_type(array_types, scalar_types): array_types = [dtype(x) for x in array_types] scalar_types = [dtype(x) for x in scalar_types] - if len(scalar_types) == 0: - if len(array_types) == 0: - return None - else: - return max(array_types) - if len(array_types) == 0: - return max(scalar_types) + maxa = _can_coerce_all(array_types) + maxsc = _can_coerce_all(scalar_types) - maxa = max(array_types) - maxsc = max(scalar_types) + if maxa is None: + return maxsc + + if maxsc is None: + return maxa try: index_a = _kind_list.index(maxa.kind) diff --git a/numpy/core/setup.py b/numpy/core/setup.py index dca1787a9..6c66512b0 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -1,11 +1,13 @@ import imp import os import sys +import shutil from os.path import join from numpy.distutils import log from distutils.dep_util import newer from distutils.sysconfig import get_config_var import warnings +import re from setup_common import * @@ -142,7 +144,7 @@ def check_math_capabilities(config, moredefs, mathlibs): # config.h in the public namespace, so we have a clash for the common # functions we test. We remove every function tested by python's # autoconf, hoping their own test are correct - if sys.version_info[:2] >= (2, 6): + if sys.version_info[:2] >= (2, 5): for f in OPTIONAL_STDFUNCS_MAYBE: if config.check_decl(fname2def(f), headers=["Python.h", "math.h"]): @@ -321,6 +323,7 @@ def configuration(parent_package='',top_path=None): d = os.path.dirname(target) if not os.path.exists(d): os.makedirs(d) + if newer(__file__,target): config_cmd = config.get_config_cmd() log.info('Generating %s',target) @@ -434,8 +437,6 @@ def configuration(parent_package='',top_path=None): # Check wether we can use inttypes (C99) formats if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']): moredefs.append(('NPY_USE_C99_FORMATS', 1)) - else: - moredefs.append(('NPY_USE_C99_FORMATS', 0)) # Inline check inline = config_cmd.check_inline() @@ -548,12 +549,13 @@ def configuration(parent_package='',top_path=None): return [] config.add_data_files('include/numpy/*.h') + config.add_include_dirs(join('src', 'npymath')) config.add_include_dirs(join('src', 'multiarray')) config.add_include_dirs(join('src', 'umath')) config.numpy_include_dirs.extend(config.paths('include')) - deps = [join('src','_signbit.c'), + deps = [join('src','npymath','_signbit.c'), join('include','numpy','*object.h'), 'include/numpy/fenv/fenv.c', 'include/numpy/fenv/fenv.h', @@ -578,10 +580,28 @@ def configuration(parent_package='',top_path=None): # (don't ask). Because clib are generated before extensions, we have to # explicitly add an extension which has generate_config_h and # generate_numpyconfig_h as sources *before* adding npymath. - config.add_library('npymath', - sources=[join('src', 'npy_math.c.src')], - depends=[]) - + + subst_dict = dict([("sep", os.path.sep)]) + def get_mathlib_info(*args): + # Another ugly hack: the mathlib info is known once build_src is run, + # but we cannot use add_installed_pkg_config here either, so we only + # updated the substition dictionary during npymath build + config_cmd = config.get_config_cmd() + mlibs = check_mathlib(config_cmd) + + posix_mlib = ' '.join(['-l%s' % l for l in mlibs]) + msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs]) + subst_dict["posix_mathlib"] = posix_mlib + subst_dict["msvc_mathlib"] = msvc_mlib + + config.add_installed_library('npymath', + sources=[join('src', 'npymath', 'npy_math.c.src'), get_mathlib_info], + install_dir='lib') + config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config", + subst_dict) + config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config", + subst_dict) + multiarray_deps = [ join('src', 'multiarray', 'arrayobject.h'), join('src', 'multiarray', 'arraytypes.h'), @@ -706,6 +726,9 @@ def configuration(parent_package='',top_path=None): config.add_extension('umath_tests', sources = [join('src','umath', 'umath_tests.c.src')]) + config.add_extension('multiarray_tests', + sources = [join('src', 'multiarray', 'multiarray_tests.c.src')]) + config.add_data_dir('tests') config.add_data_dir('tests/data') diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index 37023e8db..ab801fc6d 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -76,8 +76,8 @@ def check_api_version(apiversion, codegen_dir): "with checksum %s, but recorded checksum for C API version %d in " \ "codegen_dir/cversions.txt is %s. If functions were added in the " \ "C API, you have to update C_API_VERSION in %s." - warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash, - __file__), + warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash, + __file__), MismatchCAPIWarning) # Mandatory functions: if not found, fail the build MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", @@ -87,17 +87,19 @@ MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", # Standard functions which may not be available and for which we have a # replacement implementation. Note that some of these are C99 functions. OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh", - "rint", "trunc", "exp2", "log2"] + "rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow", + "copysign"] # Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h -OPTIONAL_STDFUNCS_MAYBE = ["expm1", "log1p", "acosh", "atanh", "asinh"] +OPTIONAL_STDFUNCS_MAYBE = ["expm1", "log1p", "acosh", "atanh", "asinh", "hypot", + "copysign"] # C99 functions: float and long double versions C99_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil", "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", "expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh", "hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp', - "exp2", "log2"] + "exp2", "log2", "copysign"] C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS] C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS] diff --git a/numpy/core/setupscons.py b/numpy/core/setupscons.py index be42246ad..3dfaa48d9 100644 --- a/numpy/core/setupscons.py +++ b/numpy/core/setupscons.py @@ -63,8 +63,10 @@ def configuration(parent_package='',top_path=None): # XXX: I really have to think about how to communicate path info # between scons and distutils, and set the options at one single # location. - h_file = join(get_scons_pkg_build_dir(config.name), '__multiarray_api.h') - t_file = join(get_scons_pkg_build_dir(config.name), 'multiarray_api.txt') + h_file = join(get_scons_pkg_build_dir(config.name), + 'include/numpy/__multiarray_api.h') + t_file = join(get_scons_pkg_build_dir(config.name), + 'include/numpy/multiarray_api.txt') config.add_data_files((header_dir, h_file), (header_dir, t_file)) @@ -73,8 +75,10 @@ def configuration(parent_package='',top_path=None): # XXX: I really have to think about how to communicate path info # between scons and distutils, and set the options at one single # location. - h_file = join(get_scons_pkg_build_dir(config.name), '__ufunc_api.h') - t_file = join(get_scons_pkg_build_dir(config.name), 'ufunc_api.txt') + h_file = join(get_scons_pkg_build_dir(config.name), + 'include/numpy/__ufunc_api.h') + t_file = join(get_scons_pkg_build_dir(config.name), + 'include/numpy/ufunc_api.txt') config.add_data_files((header_dir, h_file), (header_dir, t_file)) @@ -87,6 +91,7 @@ def configuration(parent_package='',top_path=None): config.add_sconscript('SConstruct', post_hook = add_generated_files, source_files = source_files) + config.add_scons_installed_library('npymath', 'lib') config.add_data_files('include/numpy/*.h') config.add_include_dirs('src') diff --git a/numpy/core/src/_sortmodule.c.src b/numpy/core/src/_sortmodule.c.src index 51c5feb41..28299c1a7 100644 --- a/numpy/core/src/_sortmodule.c.src +++ b/numpy/core/src/_sortmodule.c.src @@ -1,85 +1,257 @@ /* -*- c -*- */ -/* The purpose of this module is to add faster sort functions - that are type-specific. This is done by altering the - function table for the builtin descriptors. - - These sorting functions are copied almost directly from numarray - with a few modifications (complex comparisons compare the imaginary - part if the real parts are equal, for example), and the names - are changed. - - The original sorting code is due to Charles R. Harris who wrote - it for numarray. -*/ - -/* Quick sort is usually the fastest, but the worst case scenario can - be slower than the merge and heap sorts. The merge sort requires - extra memory and so for large arrays may not be useful. - - The merge sort is *stable*, meaning that equal components - are unmoved from their entry versions, so it can be used to - implement lexigraphic sorting on multiple keys. +/* + * The purpose of this module is to add faster sort functions + * that are type-specific. This is done by altering the + * function table for the builtin descriptors. + * + * These sorting functions are copied almost directly from numarray + * with a few modifications (complex comparisons compare the imaginary + * part if the real parts are equal, for example), and the names + * are changed. + * + * The original sorting code is due to Charles R. Harris who wrote + * it for numarray. + */ - The heap sort is included for completeness. -*/ +/* + * Quick sort is usually the fastest, but the worst case scenario can + * be slower than the merge and heap sorts. The merge sort requires + * extra memory and so for large arrays may not be useful. + * + * The merge sort is *stable*, meaning that equal components + * are unmoved from their entry versions, so it can be used to + * implement lexigraphic sorting on multiple keys. + * + * The heap sort is included for completeness. + */ #include "Python.h" #include "numpy/noprefix.h" +#include "numpy/npy_math.h" +#define NOT_USED NPY_UNUSED(unused) #define PYA_QS_STACK 100 #define SMALL_QUICKSORT 15 #define SMALL_MERGESORT 20 #define SMALL_STRING 16 -#define SWAP(a,b) {SWAP_temp = (b); (b)=(a); (a) = SWAP_temp;} -#define STDC_LT(a,b) ((a) < (b)) -#define STDC_LE(a,b) ((a) <= (b)) -#define STDC_EQ(a,b) ((a) == (b)) -#define NUMC_LT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag): ((p).real < (q).real))) -#define NUMC_LE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag): ((p).real <= (q).real))) -#define NUMC_EQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) -#define STRING_LT(pa, pb, len) (compare_string(pa, pb, len) < 0) -#define STRING_LE(pa, pb, len) (compare_string(pa, pb, len) <= 0) -#define STRING_EQ(pa, pb, len) (compare_string(pa, pb, len) == 0) -#define UNICODE_LT(pa, pb, len) (compare_ucs4(pa, pb, len) < 0) -#define UNICODE_LE(pa, pb, len) (compare_ucs4(pa, pb, len) <= 0) -#define UNICODE_EQ(pa, pb, len) (compare_ucs4(pa, pb, len) == 0) + +/* + ***************************************************************************** + ** SWAP MACROS ** + ***************************************************************************** + */ + +/**begin repeat + * + * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, + * CDOUBLE,CLONGDOUBLE, INTP# + * #type = npy_bool, npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, + * npy_uint, npy_long, npy_ulong, npy_longlong, npy_ulonglong, + * npy_float, npy_double, npy_longdouble, npy_cfloat, npy_cdouble, + * npy_clongdouble, npy_intp# + */ +#define @TYPE@_SWAP(a,b) {@type@ tmp = (b); (b)=(a); (a) = tmp;} + +/**end repeat**/ + +/* + ***************************************************************************** + ** COMPARISON FUNCTIONS ** + ***************************************************************************** + */ + +/**begin repeat + * + * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG# + * #type = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong# + */ +NPY_INLINE static int +@TYPE@_LT(@type@ a, @type@ b) +{ + return a < b; +} +/**end repeat**/ + + +/**begin repeat + * + * #TYPE = FLOAT, DOUBLE, LONGDOUBLE# + * #type = float, double, longdouble# + */ +NPY_INLINE static int +@TYPE@_LT(@type@ a, @type@ b) +{ + return a < b || (b != b && a == a); +} +/**end repeat**/ + + +/* + * For inline functions SUN recommends not using a return in the then part + * of an if statement. It's a SUN compiler thing, so assign the return value + * to a variable instead. + */ + +/**begin repeat + * + * #TYPE = CFLOAT, CDOUBLE, CLONGDOUBLE# + * #type = cfloat, cdouble, clongdouble# + */ +NPY_INLINE static int +@TYPE@_LT(@type@ a, @type@ b) +{ + int ret; + + if (a.real < b.real) { + ret = a.imag == a.imag || b.imag != b.imag; + } + else if (a.real > b.real) { + ret = b.imag != b.imag && a.imag == a.imag; + } + else if (a.real == b.real || (a.real != a.real && b.real != b.real)) { + ret = a.imag < b.imag || (b.imag != b.imag && a.imag == a.imag); + } + else { + ret = b.real != b.real; + } + + return ret; +} +/**end repeat**/ + + +/* The PyObject functions are stubs for later use */ +NPY_INLINE static int +PyObject_LT(PyObject *pa, PyObject *pb) +{ + return 0; +} + + +NPY_INLINE static void +STRING_COPY(char *s1, char *s2, size_t len) +{ + memcpy(s1, s2, len); +} + + +NPY_INLINE static void +STRING_SWAP(char *s1, char *s2, size_t len) +{ + while(len--) { + const char t = *s1; + *s1++ = *s2; + *s2++ = t; + } +} + + +NPY_INLINE static int +STRING_LT(char *s1, char *s2, size_t len) +{ + const unsigned char *c1 = (unsigned char *)s1; + const unsigned char *c2 = (unsigned char *)s2; + size_t i; + int ret = 0; + + for (i = 0; i < len; ++i) { + if (c1[i] != c2[i]) { + ret = c1[i] < c2[i]; + break; + } + } + return ret; +} + + +NPY_INLINE static void +UNICODE_COPY(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) +{ + while(len--) { + *s1++ = *s2++; + } +} + + +NPY_INLINE static void +UNICODE_SWAP(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) +{ + while(len--) { + const npy_ucs4 t = *s1; + *s1++ = *s2; + *s2++ = t; + } +} + + +NPY_INLINE static int +UNICODE_LT(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) +{ + size_t i; + int ret = 0; + + for (i = 0; i < len; ++i) { + if (s1[i] != s2[i]) { + ret = s1[i] < s2[i]; + break; + } + } + return ret; +} + + +/* + ***************************************************************************** + ** NUMERIC SORTS ** + ***************************************************************************** + */ /**begin repeat - #TYPE=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE,DATETIME,TIMEDELTA# - #type=Bool,byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble,datetime,timedelta# - #lessthan=STDC_LT*14,NUMC_LT*3,STDC_LT*2# - #lessequal=STDC_LE*14,NUMC_LE*3,STDC_LE*2# -**/ + * + * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, + * CFLOAT, CDOUBLE, CLONGDOUBLE# + * #type = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, longdouble, + * cfloat, cdouble, clongdouble# + */ + + static int -@TYPE@_quicksort(@type@ *start, intp num, void * NPY_UNUSED(unused)) +@TYPE@_quicksort(@type@ *start, npy_intp num, void *NOT_USED) { @type@ *pl = start; @type@ *pr = start + num - 1; - @type@ vp, SWAP_temp; + @type@ vp; @type@ *stack[PYA_QS_STACK], **sptr = stack, *pm, *pi, *pj, *pk; - for(;;) { + for (;;) { while ((pr - pl) > SMALL_QUICKSORT) { /* quicksort partition */ pm = pl + ((pr - pl) >> 1); - if (@lessthan@(*pm, *pl)) SWAP(*pm, *pl); - if (@lessthan@(*pr, *pm)) SWAP(*pr, *pm); - if (@lessthan@(*pm, *pl)) SWAP(*pm, *pl); + if (@TYPE@_LT(*pm, *pl)) @TYPE@_SWAP(*pm, *pl); + if (@TYPE@_LT(*pr, *pm)) @TYPE@_SWAP(*pr, *pm); + if (@TYPE@_LT(*pm, *pl)) @TYPE@_SWAP(*pm, *pl); vp = *pm; pi = pl; pj = pr - 1; - SWAP(*pm, *pj); - for(;;) { - do ++pi; while (@lessthan@(*pi, vp)); - do --pj; while (@lessthan@(vp, *pj)); - if (pi >= pj) break; - SWAP(*pi,*pj); + @TYPE@_SWAP(*pm, *pj); + for (;;) { + do ++pi; while (@TYPE@_LT(*pi, vp)); + do --pj; while (@TYPE@_LT(vp, *pj)); + if (pi >= pj) { + break; + } + @TYPE@_SWAP(*pi,*pj); } pk = pr - 1; - SWAP(*pi, *pk); + @TYPE@_SWAP(*pi, *pk); /* push largest partition on stack */ if (pi - pl < pr - pi) { *sptr++ = pi + 1; @@ -94,16 +266,18 @@ static int } /* insertion sort */ - for(pi = pl + 1; pi <= pr; ++pi) { + for (pi = pl + 1; pi <= pr; ++pi) { vp = *pi; pj = pi; pk = pi - 1; - while (pj > pl && @lessthan@(vp, *pk)) { + while (pj > pl && @TYPE@_LT(vp, *pk)) { *pj-- = *pk--; } *pj = vp; } - if (sptr == stack) break; + if (sptr == stack) { + break; + } pr = *(--sptr); pl = *(--sptr); } @@ -112,34 +286,36 @@ static int } static int -@TYPE@_aquicksort(@type@ *v, intp* tosort, intp num, void *NPY_UNUSED(unused)) +@TYPE@_aquicksort(@type@ *v, npy_intp* tosort, npy_intp num, void *NOT_USED) { @type@ vp; - intp *pl, *pr, SWAP_temp; - intp *stack[PYA_QS_STACK], **sptr=stack, *pm, *pi, *pj, *pk, vi; + npy_intp *pl, *pr; + npy_intp *stack[PYA_QS_STACK], **sptr=stack, *pm, *pi, *pj, *pk, vi; pl = tosort; pr = tosort + num - 1; - for(;;) { + for (;;) { while ((pr - pl) > SMALL_QUICKSORT) { /* quicksort partition */ pm = pl + ((pr - pl) >> 1); - if (@lessthan@(v[*pm],v[*pl])) SWAP(*pm,*pl); - if (@lessthan@(v[*pr],v[*pm])) SWAP(*pr,*pm); - if (@lessthan@(v[*pm],v[*pl])) SWAP(*pm,*pl); + if (@TYPE@_LT(v[*pm],v[*pl])) INTP_SWAP(*pm,*pl); + if (@TYPE@_LT(v[*pr],v[*pm])) INTP_SWAP(*pr,*pm); + if (@TYPE@_LT(v[*pm],v[*pl])) INTP_SWAP(*pm,*pl); vp = v[*pm]; pi = pl; pj = pr - 1; - SWAP(*pm,*pj); - for(;;) { - do ++pi; while (@lessthan@(v[*pi],vp)); - do --pj; while (@lessthan@(vp,v[*pj])); - if (pi >= pj) break; - SWAP(*pi,*pj); - } - pk = pr - 1; - SWAP(*pi,*pk); + INTP_SWAP(*pm,*pj); + for (;;) { + do ++pi; while (@TYPE@_LT(v[*pi],vp)); + do --pj; while (@TYPE@_LT(vp,v[*pj])); + if (pi >= pj) { + break; + } + INTP_SWAP(*pi,*pj); + } + pk = pr - 1; + INTP_SWAP(*pi,*pk); /* push largest partition on stack */ if (pi - pl < pr - pi) { *sptr++ = pi + 1; @@ -154,17 +330,19 @@ static int } /* insertion sort */ - for(pi = pl + 1; pi <= pr; ++pi) { + for (pi = pl + 1; pi <= pr; ++pi) { vi = *pi; vp = v[vi]; pj = pi; pk = pi - 1; - while (pj > pl && @lessthan@(vp, v[*pk])) { + while (pj > pl && @TYPE@_LT(vp, v[*pk])) { *pj-- = *pk--; } *pj = vi; } - if (sptr == stack) break; + if (sptr == stack) { + break; + } pr = *(--sptr); pl = *(--sptr); } @@ -174,10 +352,10 @@ static int static int -@TYPE@_heapsort(@type@ *start, intp n, void *NPY_UNUSED(unused)) +@TYPE@_heapsort(@type@ *start, npy_intp n, void *NOT_USED) { @type@ tmp, *a; - intp i,j,l; + npy_intp i,j,l; /* The array needs to be offset by one for heapsort indexing */ a = start - 1; @@ -185,15 +363,17 @@ static int for (l = n>>1; l > 0; --l) { tmp = a[l]; for (i = l, j = l<<1; j <= n;) { - if (j < n && @lessthan@(a[j], a[j+1])) + if (j < n && @TYPE@_LT(a[j], a[j+1])) { j += 1; - if (@lessthan@(tmp, a[j])) { + } + if (@TYPE@_LT(tmp, a[j])) { a[i] = a[j]; i = j; j += j; } - else + else { break; + } } a[i] = tmp; } @@ -203,15 +383,17 @@ static int a[n] = a[1]; n -= 1; for (i = 1, j = 2; j <= n;) { - if (j < n && @lessthan@(a[j], a[j+1])) + if (j < n && @TYPE@_LT(a[j], a[j+1])) { j++; - if (@lessthan@(tmp, a[j])) { + } + if (@TYPE@_LT(tmp, a[j])) { a[i] = a[j]; i = j; j += j; } - else + else { break; + } } a[i] = tmp; } @@ -220,24 +402,26 @@ static int } static int -@TYPE@_aheapsort(@type@ *v, intp *tosort, intp n, void *NPY_UNUSED(unused)) +@TYPE@_aheapsort(@type@ *v, npy_intp *tosort, npy_intp n, void *NOT_USED) { - intp *a, i,j,l, tmp; + npy_intp *a, i,j,l, tmp; /* The arrays need to be offset by one for heapsort indexing */ a = tosort - 1; for (l = n>>1; l > 0; --l) { tmp = a[l]; for (i = l, j = l<<1; j <= n;) { - if (j < n && @lessthan@(v[a[j]], v[a[j+1]])) + if (j < n && @TYPE@_LT(v[a[j]], v[a[j+1]])) { j += 1; - if (@lessthan@(v[tmp], v[a[j]])) { + } + if (@TYPE@_LT(v[tmp], v[a[j]])) { a[i] = a[j]; i = j; j += j; } - else + else { break; + } } a[i] = tmp; } @@ -247,15 +431,17 @@ static int a[n] = a[1]; n -= 1; for (i = 1, j = 2; j <= n;) { - if (j < n && @lessthan@(v[a[j]], v[a[j+1]])) + if (j < n && @TYPE@_LT(v[a[j]], v[a[j+1]])) { j++; - if (@lessthan@(v[tmp], v[a[j]])) { + } + if (@TYPE@_LT(v[tmp], v[a[j]])) { a[i] = a[j]; i = j; j += j; } - else + else { break; + } } a[i] = tmp; } @@ -273,17 +459,17 @@ static void pm = pl + ((pr - pl) >> 1); @TYPE@_mergesort0(pl, pm, pw); @TYPE@_mergesort0(pm, pr, pw); - for(pi = pw, pj = pl; pj < pm;) { + for (pi = pw, pj = pl; pj < pm;) { *pi++ = *pj++; } pj = pw; pk = pl; while (pj < pi && pm < pr) { - if (@lessequal@(*pj,*pm)) { - *pk = *pj++; + if (@TYPE@_LT(*pm,*pj)) { + *pk = *pm++; } else { - *pk = *pm++; + *pk = *pj++; } pk++; } @@ -293,11 +479,11 @@ static void } else { /* insertion sort */ - for(pi = pl + 1; pi < pr; ++pi) { + for (pi = pl + 1; pi < pr; ++pi) { vp = *pi; pj = pi; pk = pi -1; - while (pj > pl && @lessthan@(vp, *pk)) { + while (pj > pl && @TYPE@_LT(vp, *pk)) { *pj-- = *pk--; } *pj = vp; @@ -306,7 +492,7 @@ static void } static int -@TYPE@_mergesort(@type@ *start, intp num, void *NPY_UNUSED(unused)) +@TYPE@_mergesort(@type@ *start, npy_intp num, void *NOT_USED) { @type@ *pl, *pr, *pw; @@ -324,39 +510,39 @@ static int } static void -@TYPE@_amergesort0(intp *pl, intp *pr, @type@ *v, intp *pw) +@TYPE@_amergesort0(npy_intp *pl, npy_intp *pr, @type@ *v, npy_intp *pw) { @type@ vp; - intp vi, *pi, *pj, *pk, *pm; + npy_intp vi, *pi, *pj, *pk, *pm; if (pr - pl > SMALL_MERGESORT) { /* merge sort */ pm = pl + ((pr - pl + 1)>>1); @TYPE@_amergesort0(pl,pm-1,v,pw); @TYPE@_amergesort0(pm,pr,v,pw); - for(pi = pw, pj = pl; pj < pm; ++pi, ++pj) { + for (pi = pw, pj = pl; pj < pm; ++pi, ++pj) { *pi = *pj; } - for(pk = pw, pm = pl; pk < pi && pj <= pr; ++pm) { - if (@lessequal@(v[*pk],v[*pj])) { - *pm = *pk; - ++pk; - } - else { + for (pk = pw, pm = pl; pk < pi && pj <= pr; ++pm) { + if (@TYPE@_LT(v[*pj],v[*pk])) { *pm = *pj; ++pj; } + else { + *pm = *pk; + ++pk; + } } - for(; pk < pi; ++pm, ++pk) { + for (; pk < pi; ++pm, ++pk) { *pm = *pk; } } else { /* insertion sort */ - for(pi = pl + 1; pi <= pr; ++pi) { + for (pi = pl + 1; pi <= pr; ++pi) { vi = *pi; vp = v[vi]; - for(pj = pi, pk = pi - 1; pj > pl && @lessthan@(vp, v[*pk]); --pj, --pk) { + for (pj = pi, pk = pi - 1; pj > pl && @TYPE@_LT(vp, v[*pk]); --pj, --pk) { *pj = *pk; } *pj = vi; @@ -365,9 +551,9 @@ static void } static int -@TYPE@_amergesort(@type@ *v, intp *tosort, intp num, void *NPY_UNUSED(unused)) +@TYPE@_amergesort(@type@ *v, npy_intp *tosort, npy_intp num, void *NOT_USED) { - intp *pl, *pr, *pw; + npy_intp *pl, *pr, *pw; pl = tosort; pr = pl + num - 1; pw = PyDimMem_NEW((1+num/2)); @@ -382,85 +568,22 @@ static int return 0; } + + /**end repeat**/ /* - * Subroutines that will hopefully be inlined when the code - * for strings and unicode is compiled with proper flags. + ***************************************************************************** + ** STRING SORTS ** + ***************************************************************************** */ -#define copy_string memcpy - - -static void -swap_string(char *s1, char *s2, size_t len) -{ - while(len--) { - const char t = *s1; - *s1++ = *s2; - *s2++ = t; - } -} - - -static int -compare_string(char *s1, char *s2, size_t len) -{ - const unsigned char *c1 = (unsigned char *)s1; - const unsigned char *c2 = (unsigned char *)s2; - size_t i; - - for(i = 0; i < len; ++i) { - if (c1[i] != c2[i]) { - return (c1[i] > c2[i]) ? 1 : -1; - } - } - return 0; -} - - -static void -copy_ucs4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) -{ - while(len--) { - *s1++ = *s2++; - } -} - - -static void -swap_ucs4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) -{ - while(len--) { - const npy_ucs4 t = *s1; - *s1++ = *s2; - *s2++ = t; - } -} - - -static int -compare_ucs4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) -{ - size_t i; - - for(i = 0; i < len; ++i) { - if (s1[i] != s2[i]) { - return (s1[i] > s2[i]) ? 1 : -1; - } - } - return 0; -} - /**begin repeat - #TYPE=STRING, UNICODE# - #type=char, PyArray_UCS4# - #lessthan=STRING_LT, UNICODE_LT# - #lessequal=STRING_LE, UNICODE_LE# - #swap=swap_string, swap_ucs4# - #copy=copy_string, copy_ucs4# -**/ + * + * #TYPE = STRING, UNICODE# + * #type = char, PyArray_UCS4# + */ static void @TYPE@_mergesort0(@type@ *pl, @type@ *pr, @type@ *pw, @type@ *vp, size_t len) @@ -472,41 +595,41 @@ static void pm = pl + (((pr - pl)/len) >> 1)*len; @TYPE@_mergesort0(pl, pm, pw, vp, len); @TYPE@_mergesort0(pm, pr, pw, vp, len); - @copy@(pw, pl, pm - pl); + @TYPE@_COPY(pw, pl, pm - pl); pi = pw + (pm - pl); pj = pw; pk = pl; while (pj < pi && pm < pr) { - if (@lessequal@(pj, pm, len)) { - @copy@(pk, pj, len); - pj += len; + if (@TYPE@_LT(pm, pj, len)) { + @TYPE@_COPY(pk, pm, len); + pm += len; } else { - @copy@(pk, pm, len); - pm += len; + @TYPE@_COPY(pk, pj, len); + pj += len; } pk += len; } - @copy@(pk, pj, pi - pj); + @TYPE@_COPY(pk, pj, pi - pj); } else { /* insertion sort */ - for(pi = pl + len; pi < pr; pi += len) { - @copy@(vp, pi, len); + for (pi = pl + len; pi < pr; pi += len) { + @TYPE@_COPY(vp, pi, len); pj = pi; pk = pi - len; - while (pj > pl && @lessthan@(vp, pk, len)) { - @copy@(pj, pk, len); + while (pj > pl && @TYPE@_LT(vp, pk, len)) { + @TYPE@_COPY(pj, pk, len); pj -= len; pk -= len; } - @copy@(pj, vp, len); + @TYPE@_COPY(pj, vp, len); } } } static int -@TYPE@_mergesort(@type@ *start, intp num, PyArrayObject *arr) +@TYPE@_mergesort(@type@ *start, npy_intp num, PyArrayObject *arr) { const size_t elsize = arr->descr->elsize; const size_t len = elsize / sizeof(@type@); @@ -537,7 +660,7 @@ fail_0: } static int -@TYPE@_quicksort(@type@ *start, intp num, PyArrayObject *arr) +@TYPE@_quicksort(@type@ *start, npy_intp num, PyArrayObject *arr) { const size_t len = arr->descr->elsize/sizeof(@type@); @type@ *vp = malloc(arr->descr->elsize); @@ -545,25 +668,27 @@ static int @type@ *pr = start + (num - 1)*len; @type@ *stack[PYA_QS_STACK], **sptr = stack, *pm, *pi, *pj, *pk; - for(;;) { + for (;;) { while ((size_t)(pr - pl) > SMALL_QUICKSORT*len) { /* quicksort partition */ pm = pl + (((pr - pl)/len) >> 1)*len; - if (@lessthan@(pm, pl, len)) @swap@(pm, pl, len); - if (@lessthan@(pr, pm, len)) @swap@(pr, pm, len); - if (@lessthan@(pm, pl, len)) @swap@(pm, pl, len); - @copy@(vp, pm, len); + if (@TYPE@_LT(pm, pl, len)) @TYPE@_SWAP(pm, pl, len); + if (@TYPE@_LT(pr, pm, len)) @TYPE@_SWAP(pr, pm, len); + if (@TYPE@_LT(pm, pl, len)) @TYPE@_SWAP(pm, pl, len); + @TYPE@_COPY(vp, pm, len); pi = pl; pj = pr - len; - @swap@(pm, pj, len); - for(;;) { - do pi += len; while (@lessthan@(pi, vp, len)); - do pj -= len; while (@lessthan@(vp, pj, len)); - if (pi >= pj) break; - @swap@(pi, pj, len); + @TYPE@_SWAP(pm, pj, len); + for (;;) { + do pi += len; while (@TYPE@_LT(pi, vp, len)); + do pj -= len; while (@TYPE@_LT(vp, pj, len)); + if (pi >= pj) { + break; + } + @TYPE@_SWAP(pi, pj, len); } pk = pr - len; - @swap@(pi, pk, len); + @TYPE@_SWAP(pi, pk, len); /* push largest partition on stack */ if (pi - pl < pr - pi) { *sptr++ = pi + len; @@ -578,18 +703,20 @@ static int } /* insertion sort */ - for(pi = pl + len; pi <= pr; pi += len) { - @copy@(vp, pi, len); + for (pi = pl + len; pi <= pr; pi += len) { + @TYPE@_COPY(vp, pi, len); pj = pi; pk = pi - len; - while (pj > pl && @lessthan@(vp, pk, len)) { - @copy@(pj, pk, len); + while (pj > pl && @TYPE@_LT(vp, pk, len)) { + @TYPE@_COPY(pj, pk, len); pj -= len; pk -= len; } - @copy@(pj, vp, len); + @TYPE@_COPY(pj, vp, len); + } + if (sptr == stack) { + break; } - if (sptr == stack) break; pr = *(--sptr); pl = *(--sptr); } @@ -600,45 +727,47 @@ static int static int -@TYPE@_heapsort(@type@ *start, intp n, PyArrayObject *arr) +@TYPE@_heapsort(@type@ *start, npy_intp n, PyArrayObject *arr) { size_t len = arr->descr->elsize/sizeof(@type@); @type@ *tmp = malloc(arr->descr->elsize); @type@ *a = start - len; - intp i,j,l; + npy_intp i,j,l; for (l = n>>1; l > 0; --l) { - @copy@(tmp, a + l*len, len); + @TYPE@_COPY(tmp, a + l*len, len); for (i = l, j = l<<1; j <= n;) { - if (j < n && @lessthan@(a + j*len, a + (j+1)*len, len)) + if (j < n && @TYPE@_LT(a + j*len, a + (j+1)*len, len)) j += 1; - if (@lessthan@(tmp, a + j*len, len)) { - @copy@(a + i*len, a + j*len, len); + if (@TYPE@_LT(tmp, a + j*len, len)) { + @TYPE@_COPY(a + i*len, a + j*len, len); i = j; j += j; } - else + else { break; + } } - @copy@(a + i*len, tmp, len); + @TYPE@_COPY(a + i*len, tmp, len); } for (; n > 1;) { - @copy@(tmp, a + n*len, len); - @copy@(a + n*len, a + len, len); + @TYPE@_COPY(tmp, a + n*len, len); + @TYPE@_COPY(a + n*len, a + len, len); n -= 1; for (i = 1, j = 2; j <= n;) { - if (j < n && @lessthan@(a + j*len, a + (j+1)*len, len)) + if (j < n && @TYPE@_LT(a + j*len, a + (j+1)*len, len)) j++; - if (@lessthan@(tmp, a + j*len, len)) { - @copy@(a + i*len, a + j*len, len); + if (@TYPE@_LT(tmp, a + j*len, len)) { + @TYPE@_COPY(a + i*len, a + j*len, len); i = j; j += j; } - else + else { break; + } } - @copy@(a + i*len, tmp, len); + @TYPE@_COPY(a + i*len, tmp, len); } free(tmp); @@ -647,10 +776,10 @@ static int static int -@TYPE@_aheapsort(@type@ *v, intp *tosort, intp n, PyArrayObject *arr) +@TYPE@_aheapsort(@type@ *v, npy_intp *tosort, npy_intp n, PyArrayObject *arr) { size_t len = arr->descr->elsize/sizeof(@type@); - intp *a, i,j,l, tmp; + npy_intp *a, i,j,l, tmp; /* The array needs to be offset by one for heapsort indexing */ a = tosort - 1; @@ -658,15 +787,16 @@ static int for (l = n>>1; l > 0; --l) { tmp = a[l]; for (i = l, j = l<<1; j <= n;) { - if (j < n && @lessthan@(v + a[j]*len, v + a[j+1]*len, len)) + if (j < n && @TYPE@_LT(v + a[j]*len, v + a[j+1]*len, len)) j += 1; - if (@lessthan@(v + tmp*len, v + a[j]*len, len)) { + if (@TYPE@_LT(v + tmp*len, v + a[j]*len, len)) { a[i] = a[j]; i = j; j += j; } - else + else { break; + } } a[i] = tmp; } @@ -676,15 +806,16 @@ static int a[n] = a[1]; n -= 1; for (i = 1, j = 2; j <= n;) { - if (j < n && @lessthan@(v + a[j]*len, v + a[j+1]*len, len)) + if (j < n && @TYPE@_LT(v + a[j]*len, v + a[j+1]*len, len)) j++; - if (@lessthan@(v + tmp*len, v + a[j]*len, len)) { + if (@TYPE@_LT(v + tmp*len, v + a[j]*len, len)) { a[i] = a[j]; i = j; j += j; } - else + else { break; + } } a[i] = tmp; } @@ -694,35 +825,37 @@ static int static int -@TYPE@_aquicksort(@type@ *v, intp* tosort, intp num, PyArrayObject *arr) +@TYPE@_aquicksort(@type@ *v, npy_intp* tosort, npy_intp num, PyArrayObject *arr) { size_t len = arr->descr->elsize/sizeof(@type@); @type@ *vp; - intp *pl = tosort; - intp *pr = tosort + num - 1; - intp *stack[PYA_QS_STACK]; - intp **sptr=stack; - intp *pm, *pi, *pj, *pk, vi, SWAP_temp; + npy_intp *pl = tosort; + npy_intp *pr = tosort + num - 1; + npy_intp *stack[PYA_QS_STACK]; + npy_intp **sptr=stack; + npy_intp *pm, *pi, *pj, *pk, vi; - for(;;) { + for (;;) { while ((pr - pl) > SMALL_QUICKSORT) { /* quicksort partition */ pm = pl + ((pr - pl) >> 1); - if (@lessthan@(v + (*pm)*len, v + (*pl)*len, len)) SWAP(*pm, *pl); - if (@lessthan@(v + (*pr)*len, v + (*pm)*len, len)) SWAP(*pr, *pm); - if (@lessthan@(v + (*pm)*len, v + (*pl)*len, len)) SWAP(*pm, *pl); + if (@TYPE@_LT(v + (*pm)*len, v + (*pl)*len, len)) INTP_SWAP(*pm, *pl); + if (@TYPE@_LT(v + (*pr)*len, v + (*pm)*len, len)) INTP_SWAP(*pr, *pm); + if (@TYPE@_LT(v + (*pm)*len, v + (*pl)*len, len)) INTP_SWAP(*pm, *pl); vp = v + (*pm)*len; pi = pl; pj = pr - 1; - SWAP(*pm,*pj); - for(;;) { - do ++pi; while (@lessthan@(v + (*pi)*len, vp, len)); - do --pj; while (@lessthan@(vp, v + (*pj)*len, len)); - if (pi >= pj) break; - SWAP(*pi,*pj); + INTP_SWAP(*pm,*pj); + for (;;) { + do ++pi; while (@TYPE@_LT(v + (*pi)*len, vp, len)); + do --pj; while (@TYPE@_LT(vp, v + (*pj)*len, len)); + if (pi >= pj) { + break; + } + INTP_SWAP(*pi,*pj); } pk = pr - 1; - SWAP(*pi,*pk); + INTP_SWAP(*pi,*pk); /* push largest partition on stack */ if (pi - pl < pr - pi) { *sptr++ = pi + 1; @@ -737,17 +870,19 @@ static int } /* insertion sort */ - for(pi = pl + 1; pi <= pr; ++pi) { + for (pi = pl + 1; pi <= pr; ++pi) { vi = *pi; vp = v + vi*len; pj = pi; pk = pi - 1; - while (pj > pl && @lessthan@(vp, v + (*pk)*len, len)) { + while (pj > pl && @TYPE@_LT(vp, v + (*pk)*len, len)) { *pj-- = *pk--; } *pj = vi; } - if (sptr == stack) break; + if (sptr == stack) { + break; + } pr = *(--sptr); pl = *(--sptr); } @@ -757,26 +892,26 @@ static int static void -@TYPE@_amergesort0(intp *pl, intp *pr, @type@ *v, intp *pw, int len) +@TYPE@_amergesort0(npy_intp *pl, npy_intp *pr, @type@ *v, npy_intp *pw, int len) { @type@ *vp; - intp vi, *pi, *pj, *pk, *pm; + npy_intp vi, *pi, *pj, *pk, *pm; if (pr - pl > SMALL_MERGESORT) { /* merge sort */ pm = pl + ((pr - pl) >> 1); @TYPE@_amergesort0(pl,pm,v,pw,len); @TYPE@_amergesort0(pm,pr,v,pw,len); - for(pi = pw, pj = pl; pj < pm;) { + for (pi = pw, pj = pl; pj < pm;) { *pi++ = *pj++; } pj = pw; pk = pl; while (pj < pi && pm < pr) { - if (@lessequal@(v + (*pj)*len, v + (*pm)*len, len)) { - *pk = *pj++; - } else { + if (@TYPE@_LT(v + (*pm)*len, v + (*pj)*len, len)) { *pk = *pm++; + } else { + *pk = *pj++; } pk++; } @@ -785,12 +920,12 @@ static void } } else { /* insertion sort */ - for(pi = pl + 1; pi < pr; ++pi) { + for (pi = pl + 1; pi < pr; ++pi) { vi = *pi; vp = v + vi*len; pj = pi; pk = pi -1; - while (pj > pl && @lessthan@(vp, v + (*pk)*len, len)) { + while (pj > pl && @TYPE@_LT(vp, v + (*pk)*len, len)) { *pj-- = *pk--; } *pj = vi; @@ -800,11 +935,11 @@ static void static int -@TYPE@_amergesort(@type@ *v, intp *tosort, intp num, PyArrayObject *arr) +@TYPE@_amergesort(@type@ *v, npy_intp *tosort, npy_intp num, PyArrayObject *arr) { const size_t elsize = arr->descr->elsize; const size_t len = elsize / sizeof(@type@); - intp *pl, *pr, *pw; + npy_intp *pl, *pr, *pw; pl = tosort; pr = pl + num; @@ -826,20 +961,23 @@ add_sortfuncs(void) PyArray_Descr *descr; /**begin repeat - #TYPE=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE,STRING,UNICODE,DATETIME,TIMEDELTA# - **/ + * + * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, + * CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE# + */ descr = PyArray_DescrFromType(PyArray_@TYPE@); - descr->f->sort[PyArray_QUICKSORT] = \ + descr->f->sort[PyArray_QUICKSORT] = (PyArray_SortFunc *)@TYPE@_quicksort; - descr->f->argsort[PyArray_QUICKSORT] = \ + descr->f->argsort[PyArray_QUICKSORT] = (PyArray_ArgSortFunc *)@TYPE@_aquicksort; - descr->f->sort[PyArray_HEAPSORT] = \ + descr->f->sort[PyArray_HEAPSORT] = (PyArray_SortFunc *)@TYPE@_heapsort; - descr->f->argsort[PyArray_HEAPSORT] = \ + descr->f->argsort[PyArray_HEAPSORT] = (PyArray_ArgSortFunc *)@TYPE@_aheapsort; - descr->f->sort[PyArray_MERGESORT] = \ + descr->f->sort[PyArray_MERGESORT] = (PyArray_SortFunc *)@TYPE@_mergesort; - descr->f->argsort[PyArray_MERGESORT] = \ + descr->f->argsort[PyArray_MERGESORT] = (PyArray_ArgSortFunc *)@TYPE@_amergesort; /**end repeat**/ diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c index dccd4984f..2f49e03a5 100644 --- a/numpy/core/src/multiarray/arrayobject.c +++ b/numpy/core/src/multiarray/arrayobject.c @@ -1099,16 +1099,15 @@ PyArray_CheckStrides(int elsize, int nd, intp numbytes, intp offset, static PyObject * array_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds) { - static char *kwlist[] = {"shape", "dtype", "buffer", - "offset", "strides", + static char *kwlist[] = {"shape", "dtype", "buffer", "offset", "strides", "order", NULL}; - PyArray_Descr *descr=NULL; + PyArray_Descr *descr = NULL; int itemsize; PyArray_Dims dims = {NULL, 0}; PyArray_Dims strides = {NULL, 0}; PyArray_Chunk buffer; - longlong offset=0; - NPY_ORDER order=PyArray_CORDER; + longlong offset = 0; + NPY_ORDER order = PyArray_CORDER; int fortran = 0; PyArrayObject *ret; @@ -1268,73 +1267,65 @@ array_alloc(PyTypeObject *type, Py_ssize_t NPY_UNUSED(nitems)) NPY_NO_EXPORT PyTypeObject PyArray_Type = { +#if defined(NPY_PY3K) + PyVarObject_HEAD_INIT(NULL, 0) +#else PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.ndarray", /* tp_name */ - sizeof(PyArrayObject), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ +#endif + "numpy.ndarray", /* tp_name */ + sizeof(PyArrayObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)array_dealloc, /* tp_dealloc */ - (printfunc)NULL, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - (cmpfunc)0, /* tp_compare */ - (reprfunc)array_repr, /* tp_repr */ - &array_as_number, /* tp_as_number */ - &array_as_sequence, /* tp_as_sequence */ - &array_as_mapping, /* tp_as_mapping */ - (hashfunc)0, /* tp_hash */ - (ternaryfunc)0, /* tp_call */ - (reprfunc)array_str, /* tp_str */ - (getattrofunc)0, /* tp_getattro */ - (setattrofunc)0, /* tp_setattro */ - &array_as_buffer, /* tp_as_buffer */ + (destructor)array_dealloc, /* tp_dealloc */ + (printfunc)NULL, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if defined(NPY_PY3K) + 0, /* tp_reserved */ +#else + 0, /* tp_compare */ +#endif + (reprfunc)array_repr, /* tp_repr */ + &array_as_number, /* tp_as_number */ + &array_as_sequence, /* tp_as_sequence */ + &array_as_mapping, /* tp_as_mapping */ + (hashfunc)0, /* tp_hash */ + (ternaryfunc)0, /* tp_call */ + (reprfunc)array_str, /* tp_str */ + (getattrofunc)0, /* tp_getattro */ + (setattrofunc)0, /* tp_setattro */ + &array_as_buffer, /* tp_as_buffer */ (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE - | Py_TPFLAGS_CHECKTYPES), /* tp_flags */ - /*Documentation string */ - 0, /* tp_doc */ - - (traverseproc)0, /* tp_traverse */ - (inquiry)0, /* tp_clear */ - (richcmpfunc)array_richcompare, /* tp_richcompare */ - offsetof(PyArrayObject, weakreflist), /* tp_weaklistoffset */ - - /* Iterator support (use standard) */ - - (getiterfunc)array_iter, /* tp_iter */ - (iternextfunc)0, /* tp_iternext */ - - /* Sub-classing (new-style object) support */ - - array_methods, /* tp_methods */ - 0, /* tp_members */ - array_getsetlist, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - array_alloc, /* tp_alloc */ - (newfunc)array_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ - -#ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ -#endif + | Py_TPFLAGS_CHECKTYPES), /* tp_flags */ + 0, /* tp_doc */ + + (traverseproc)0, /* tp_traverse */ + (inquiry)0, /* tp_clear */ + (richcmpfunc)array_richcompare, /* tp_richcompare */ + offsetof(PyArrayObject, weakreflist), /* tp_weaklistoffset */ + (getiterfunc)array_iter, /* tp_iter */ + (iternextfunc)0, /* tp_iternext */ + array_methods, /* tp_methods */ + 0, /* tp_members */ + array_getsetlist, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)0, /* tp_init */ + array_alloc, /* tp_alloc */ + (newfunc)array_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ }; diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index 7080fbe7a..420fcea7d 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -2147,7 +2147,13 @@ VOID_nonzero (char *ip, PyArrayObject *ap) #undef __ALIGNED -/****************** compare **********************************/ +/* + ***************************************************************************** + ** COMPARISON FUNCTIONS ** + ***************************************************************************** + */ + +/* boolean type */ static int BOOL_compare(Bool *ip1, Bool *ip2, PyArrayObject *NPY_UNUSED(ap)) @@ -2155,48 +2161,144 @@ BOOL_compare(Bool *ip1, Bool *ip2, PyArrayObject *NPY_UNUSED(ap)) return (*ip1 ? (*ip2 ? 0 : 1) : (*ip2 ? -1 : 0)); } + +/* integer types */ + /**begin repeat -#fname=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,DATETIME,TIMEDELTA# -#type=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, datetime, timedelta# -*/ + * #TYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, DATETIME, TIMEDELTA# + * #type = byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, datetime, timedelta# + */ static int -@fname@_compare (@type@ *ip1, @type@ *ip2, PyArrayObject *NPY_UNUSED(ap)) +@TYPE@_compare (@type@ *pa, @type@ *pb, PyArrayObject *NPY_UNUSED(ap)) { - return *ip1 < *ip2 ? -1 : *ip1 == *ip2 ? 0 : 1; + const @type@ a = *pa; + const @type@ b = *pb; + + return a < b ? -1 : a == b ? 0 : 1; } /**end repeat**/ -/* compare imaginary part first, then complex if equal imaginary */ + +/* float types */ + +/* + * The real/complex comparison functions are compatible with the new sort + * order for nans introduced in numpy 1.4.0. All nan values now compare + * larger than non-nan values and are sorted to the end. The comparison + * order is: + * + * Real: [R, nan] + * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] + * + * where complex values with the same nan placements are sorted according + * to the non-nan part if it exists. If both the real and imaginary parts + * of complex types are non-nan the order is the same as the real parts + * unless they happen to be equal, in which case the order is that of the + * imaginary parts. + */ + /**begin repeat -#fname=CFLOAT, CDOUBLE, CLONGDOUBLE# -#type= float, double, longdouble# -*/ + * #TYPE = FLOAT, DOUBLE, LONGDOUBLE# + * #type = float, double, longdouble# + */ + +#define LT(a,b) ((a) < (b) || ((b) != (b) && (a) ==(a))) static int -@fname@_compare (@type@ *ip1, @type@ *ip2, PyArrayObject *NPY_UNUSED(ap)) +@TYPE@_compare(@type@ *pa, @type@ *pb) { - if (*ip1 == *ip2) { - return ip1[1]<ip2[1] ? -1 : (ip1[1] == ip2[1] ? 0 : 1); + const @type@ a = *pa; + const @type@ b = *pb; + int ret; + + if (LT(a,b)) { + ret = -1; + } + else if (LT(b,a)) { + ret = 1; + } + else { + ret = 0; + } + return ret; +} + + +static int +C@TYPE@_compare(@type@ *pa, @type@ *pb) +{ + const @type@ ar = pa[0]; + const @type@ ai = pa[1]; + const @type@ br = pb[0]; + const @type@ bi = pb[1]; + int ret; + + if (ar < br) { + if (ai == ai || bi != bi) { + ret = -1; + } + else { + ret = 1; + } + } + else if (br < ar) { + if (bi == bi || ai != ai) { + ret = 1; + } + else { + ret = -1; + } + } + else if (ar == br || (ar != ar && br != br)) { + if (LT(ai,bi)) { + ret = -1; + } + else if (LT(bi,ai)) { + ret = 1; + } + else { + ret = 0; + } + } + else if (ar == ar) { + ret = -1; } else { - return *ip1 < *ip2 ? -1 : 1; + ret = 1; } + + return ret; } - /**end repeat**/ + +#undef LT + +/**end repeat**/ + + +/* object type */ static int OBJECT_compare(PyObject **ip1, PyObject **ip2, PyArrayObject *NPY_UNUSED(ap)) { if ((*ip1 == NULL) || (*ip2 == NULL)) { - if (ip1 == ip2) return 1; - if (ip1 == NULL) return -1; + if (ip1 == ip2) { + return 1; + } + if (ip1 == NULL) { + return -1; + } return 1; } return PyObject_Compare(*ip1, *ip2); } + +/* string type */ + static int STRING_compare(char *ip1, char *ip2, PyArrayObject *ap) { @@ -2213,33 +2315,38 @@ STRING_compare(char *ip1, char *ip2, PyArrayObject *ap) return 0; } -/* taken from Python */ + +/* unicode type */ + static int UNICODE_compare(PyArray_UCS4 *ip1, PyArray_UCS4 *ip2, PyArrayObject *ap) { - int itemsize=ap->descr->elsize; - PyArray_UCS4 c1, c2; - - if (itemsize < 0) return 0; + int itemsize = ap->descr->elsize; + if (itemsize < 0) { + return 0; + } while(itemsize-- > 0) { - c1 = *ip1++; - c2 = *ip2++; - - if (c1 != c2) + PyArray_UCS4 c1 = *ip1++; + PyArray_UCS4 c2 = *ip2++; + if (c1 != c2) { return (c1 < c2) ? -1 : 1; + } } return 0; } -/* If fields are defined, then compare on first field and if equal - compare on second field. Continue until done or comparison results - in not_equal. - Must align data passed on to sub-comparisons. -*/ +/* void type */ +/* + * If fields are defined, then compare on first field and if equal + * compare on second field. Continue until done or comparison results + * in not_equal. + * + * Must align data passed on to sub-comparisons. + */ static int VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) { @@ -2247,17 +2354,18 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) PyObject *names, *key; PyObject *tup, *title; char *nip1, *nip2; - int i, offset, res=0; + int i, offset, res = 0; - if (!PyArray_HASFIELDS(ap)) + if (!PyArray_HASFIELDS(ap)) { return STRING_compare(ip1, ip2, ap); - + } descr = ap->descr; - /* Compare on the first-field. If equal, then - compare on the second-field, etc. + /* + * Compare on the first-field. If equal, then + * compare on the second-field, etc. */ names = descr->names; - for (i=0; i<PyTuple_GET_SIZE(names); i++) { + for (i = 0; i < PyTuple_GET_SIZE(names); i++) { key = PyTuple_GET_ITEM(names, i); tup = PyDict_GetItem(descr->fields, key); if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset, @@ -2271,15 +2379,18 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) if (((intp)(nip1) % new->alignment) != 0) { /* create buffer and copy */ nip1 = _pya_malloc(new->elsize); - if (nip1 == NULL) goto finish; + if (nip1 == NULL) { + goto finish; + } memcpy(nip1, ip1+offset, new->elsize); } if (((intp)(nip2) % new->alignment) != 0) { /* copy data to a buffer */ nip2 = _pya_malloc(new->elsize); if (nip2 == NULL) { - if (nip1 != ip1+offset) + if (nip1 != ip1+offset) { _pya_free(nip1); + } goto finish; } memcpy(nip2, ip2+offset, new->elsize); @@ -2294,7 +2405,9 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) _pya_free(nip2); } } - if (res != 0) break; + if (res != 0) { + break; + } } finish: diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 92723dbf1..e4873913a 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -2978,16 +2978,16 @@ PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, intp num, char *sep) { PyArrayObject *ret; size_t nread = 0; - char *tmp; if (PyDataType_REFCHK(dtype)) { PyErr_SetString(PyExc_ValueError, - "cannot read into object array"); + "Cannot read into object array"); Py_DECREF(dtype); return NULL; } if (dtype->elsize == 0) { - PyErr_SetString(PyExc_ValueError, "0-sized elements."); + PyErr_SetString(PyExc_ValueError, + "The elements are 0-sized."); Py_DECREF(dtype); return NULL; } @@ -2997,28 +2997,20 @@ PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, intp num, char *sep) else { if (dtype->f->scanfunc == NULL) { PyErr_SetString(PyExc_ValueError, - "don't know how to read " \ - "character files with that " \ - "array type"); + "Unable to read character files of that array type"); Py_DECREF(dtype); return NULL; } - ret = array_from_text(dtype, num, sep, &nread, - fp, - (next_element) fromfile_next_element, - (skip_separator) fromfile_skip_separator, - NULL); + ret = array_from_text(dtype, num, sep, &nread, fp, + (next_element) fromfile_next_element, + (skip_separator) fromfile_skip_separator, NULL); } if (((intp) nread) < num) { - fprintf(stderr, "%ld items requested but only %ld read\n", - (long) num, (long) nread); - /* Make sure realloc is > 0 */ - tmp = PyDataMem_RENEW(ret->data, - NPY_MAX(nread,1) * ret->descr->elsize); - /* FIXME: This should not raise a memory error when nread == 0 - We should return an empty array or at least raise an EOF Error. - */ - if ((tmp == NULL) || (nread == 0)) { + /* Realloc memory for smaller number of elements */ + const size_t nsize = NPY_MAX(nread,1)*ret->descr->elsize; + char *tmp; + + if((tmp = PyDataMem_RENEW(ret->data, nsize)) == NULL) { Py_DECREF(ret); return PyErr_NoMemory(); } diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index 909af2243..edcab8b09 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -2626,62 +2626,62 @@ static PyMappingMethods descr_as_mapping = { }; /****************** End of Mapping Protocol ******************************/ + NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type = { +#if defined(NPY_PY3K) + PyVarObject_HEAD_INIT(NULL, 0) +#else PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.dtype", /* tp_name */ - sizeof(PyArray_Descr), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ +#endif + "numpy.dtype", /* tp_name */ + sizeof(PyArray_Descr), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arraydescr_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - (reprfunc)arraydescr_repr, /* tp_repr */ - 0, /* tp_as_number */ - &descr_as_sequence, /* tp_as_sequence */ - &descr_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - (reprfunc)arraydescr_str, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - (richcmpfunc)arraydescr_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - arraydescr_methods, /* tp_methods */ - arraydescr_members, /* tp_members */ - arraydescr_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - arraydescr_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ - -#ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + (destructor)arraydescr_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if defined(NPY_PY3K) + (void *)0, /* tp_reserved */ +#else + 0, /* tp_compare */ #endif + (reprfunc)arraydescr_repr, /* tp_repr */ + 0, /* tp_as_number */ + &descr_as_sequence, /* tp_as_sequence */ + &descr_as_mapping, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + (reprfunc)arraydescr_str, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + (richcmpfunc)arraydescr_richcompare, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + arraydescr_methods, /* tp_methods */ + arraydescr_members, /* tp_members */ + arraydescr_getsets, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + arraydescr_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ }; diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c index 869ed613e..38121f910 100644 --- a/numpy/core/src/multiarray/flagsobject.c +++ b/numpy/core/src/multiarray/flagsobject.c @@ -553,61 +553,60 @@ arrayflags_new(PyTypeObject *NPY_UNUSED(self), PyObject *args, PyObject *NPY_UNU } NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type = { +#if defined(NPY_PY3K) + PyVarObject_HEAD_INIT(NULL, 0) +#else PyObject_HEAD_INIT(NULL) - 0, + 0, /* ob_size */ +#endif "numpy.flagsobj", sizeof(PyArrayFlagsObject), - 0, /* tp_itemsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arrayflags_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - (cmpfunc)arrayflags_compare, /* tp_compare */ - (reprfunc)arrayflags_print, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - &arrayflags_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - (reprfunc)arrayflags_print, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - arrayflags_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - arrayflags_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ - -#ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + (destructor)arrayflags_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if defined(NPY_PY3K) + 0, /* tp_reserved */ +#else + (cmpfunc)arrayflags_compare, /* tp_compare */ #endif + (reprfunc)arrayflags_print, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + &arrayflags_as_mapping, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + (reprfunc)arrayflags_print, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + arrayflags_getsets, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + arrayflags_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ }; diff --git a/numpy/core/src/multiarray/global.c b/numpy/core/src/multiarray/global.c deleted file mode 100644 index 22306da23..000000000 --- a/numpy/core/src/multiarray/global.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "config.h" - -NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c index 6f25d9432..d6eac84ef 100644 --- a/numpy/core/src/multiarray/iterators.c +++ b/numpy/core/src/multiarray/iterators.c @@ -266,27 +266,33 @@ slice_GetIndices(PySliceObject *r, intp length, /* Aided by Peter J. Verveer's nd_image package and numpy's arraymap ****/ /* and Python's array iterator ***/ -/*NUMPY_API - * Get Iterator. - */ -NPY_NO_EXPORT PyObject * -PyArray_IterNew(PyObject *obj) +/* get the dataptr from its current coordinates for simple iterator */ +static char* +get_ptr_simple(PyArrayIterObject* iter, npy_intp *coordinates) { - PyArrayIterObject *it; - int i, nd; - PyArrayObject *ao = (PyArrayObject *)obj; + npy_intp i; + char *ret; - if (!PyArray_Check(ao)) { - PyErr_BadInternalCall(); - return NULL; - } + ret = iter->ao->data; - it = (PyArrayIterObject *)_pya_malloc(sizeof(PyArrayIterObject)); - PyObject_Init((PyObject *)it, &PyArrayIter_Type); - /* it = PyObject_New(PyArrayIterObject, &PyArrayIter_Type);*/ - if (it == NULL) { - return NULL; + for(i = 0; i < iter->ao->nd; ++i) { + ret += coordinates[i] * iter->strides[i]; } + + return ret; +} + +/* + * This is common initialization code between PyArrayIterObject and + * PyArrayNeighborhoodIterObject + * + * Increase ao refcount + */ +static PyObject * +array_iter_base_init(PyArrayIterObject *it, PyArrayObject *ao) +{ + int nd, i; + nd = ao->nd; PyArray_UpdateFlags(ao, CONTIGUOUS); if (PyArray_ISCONTIGUOUS(ao)) { @@ -307,12 +313,50 @@ PyArray_IterNew(PyObject *obj) if (i > 0) { it->factors[nd-i-1] = it->factors[nd-i] * ao->dimensions[nd-i]; } + it->bounds[i][0] = 0; + it->bounds[i][1] = ao->dimensions[i] - 1; + it->limits[i][0] = 0; + it->limits[i][1] = ao->dimensions[i] - 1; + it->limits_sizes[i] = it->limits[i][1] - it->limits[i][0] + 1; } + + it->translate = &get_ptr_simple; PyArray_ITER_RESET(it); return (PyObject *)it; } +static void +array_iter_base_dealloc(PyArrayIterObject *it) +{ + Py_XDECREF(it->ao); +} + +/*NUMPY_API + * Get Iterator. + */ +NPY_NO_EXPORT PyObject * +PyArray_IterNew(PyObject *obj) +{ + PyArrayIterObject *it; + PyArrayObject *ao = (PyArrayObject *)obj; + + if (!PyArray_Check(ao)) { + PyErr_BadInternalCall(); + return NULL; + } + + it = (PyArrayIterObject *)_pya_malloc(sizeof(PyArrayIterObject)); + PyObject_Init((PyObject *)it, &PyArrayIter_Type); + /* it = PyObject_New(PyArrayIterObject, &PyArrayIter_Type);*/ + if (it == NULL) { + return NULL; + } + + array_iter_base_init(it, ao); + return (PyObject *)it; +} + /*NUMPY_API * Get Iterator broadcast to a particular shape */ @@ -502,7 +546,7 @@ arrayiter_next(PyArrayIterObject *it) static void arrayiter_dealloc(PyArrayIterObject *it) { - Py_XDECREF(it->ao); + array_iter_base_dealloc(it); _pya_free(it); } @@ -1174,8 +1218,12 @@ iter_coords_get(PyArrayIterObject *self) int i; val = self->index; for (i = 0; i < nd; i++) { - self->coordinates[i] = val / self->factors[i]; - val = val % self->factors[i]; + if (self->factors[i] != 0) { + self->coordinates[i] = val / self->factors[i]; + val = val % self->factors[i]; + } else { + self->coordinates[i] = 0; + } } } return PyArray_IntTupleFromIntp(nd, self->coordinates); @@ -1189,63 +1237,62 @@ static PyGetSetDef iter_getsets[] = { }; NPY_NO_EXPORT PyTypeObject PyArrayIter_Type = { +#if defined(NPY_PY3K) + PyVarObject_HEAD_INIT(NULL, 0) +#else PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.flatiter", /* tp_name */ - sizeof(PyArrayIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ +#endif + "numpy.flatiter", /* tp_name */ + sizeof(PyArrayIterObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arrayiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - &iter_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - (richcmpfunc)iter_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)arrayiter_next, /* tp_iternext */ - iter_methods, /* tp_methods */ - iter_members, /* tp_members */ - iter_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + (destructor)arrayiter_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if defined(NPY_PY3K) + 0, /* tp_reserved */ +#else + 0, /* tp_compare */ #endif - + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + &iter_as_mapping, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + (richcmpfunc)iter_richcompare, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + (iternextfunc)arrayiter_next, /* tp_iternext */ + iter_methods, /* tp_methods */ + iter_members, /* tp_members */ + iter_getsets, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ }; /** END of Array Iterator **/ @@ -1652,61 +1699,387 @@ static PyMethodDef arraymultiter_methods[] = { }; NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type = { +#if defined(NPY_PY3K) + PyVarObject_HEAD_INIT(NULL, 0) +#else PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.broadcast", /* tp_name */ - sizeof(PyArrayMultiIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ +#endif + "numpy.broadcast", /* tp_name */ + sizeof(PyArrayMultiIterObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arraymultiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)arraymultiter_next, /* tp_iternext */ - arraymultiter_methods, /* tp_methods */ - arraymultiter_members, /* tp_members */ - arraymultiter_getsetlist, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - 0, /* tp_alloc */ - arraymultiter_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ - -#ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + (destructor)arraymultiter_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if defined(NPY_PY3K) + 0, /* tp_reserved */ +#else + 0, /* tp_compare */ +#endif + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + (iternextfunc)arraymultiter_next, /* tp_iternext */ + arraymultiter_methods, /* tp_methods */ + arraymultiter_members, /* tp_members */ + arraymultiter_getsetlist, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)0, /* tp_init */ + 0, /* tp_alloc */ + arraymultiter_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ +}; + +/*========================= Neighborhood iterator ======================*/ + +static void neighiter_dealloc(PyArrayNeighborhoodIterObject* iter); + +static char* _set_constant(PyArrayNeighborhoodIterObject* iter, + PyArrayObject *fill) +{ + char *ret; + PyArrayIterObject *ar = iter->_internal_iter; + int storeflags, st; + + ret = PyDataMem_NEW(ar->ao->descr->elsize); + if (ret == NULL) { + PyErr_SetNone(PyExc_MemoryError); + return NULL; + } + + if (PyArray_ISOBJECT(ar->ao)) { + memcpy(ret, fill->data, sizeof(PyObject*)); + Py_INCREF(*(PyObject**)ret); + } else { + /* Non-object types */ + + storeflags = ar->ao->flags; + ar->ao->flags |= BEHAVED; + st = ar->ao->descr->f->setitem((PyObject*)fill, ret, ar->ao); + ar->ao->flags = storeflags; + + if (st < 0) { + PyDataMem_FREE(ret); + return NULL; + } + } + + return ret; +} + +#define _INF_SET_PTR(c) \ + bd = coordinates[c] + p->coordinates[c]; \ + if (bd < p->limits[c][0] || bd > p->limits[c][1]) { \ + return niter->constant; \ + } \ + _coordinates[c] = bd; + +/* set the dataptr from its current coordinates */ +static char* +get_ptr_constant(PyArrayIterObject* _iter, npy_intp *coordinates) +{ + int i; + npy_intp bd, _coordinates[NPY_MAXDIMS]; + PyArrayNeighborhoodIterObject *niter = (PyArrayNeighborhoodIterObject*)_iter; + PyArrayIterObject *p = niter->_internal_iter; + + for(i = 0; i < niter->nd; ++i) { + _INF_SET_PTR(i) + } + + return p->translate(p, _coordinates); +} +#undef _INF_SET_PTR + +#define _NPY_IS_EVEN(x) ((x) % 2 == 0) + +/* For an array x of dimension n, and given index i, returns j, 0 <= j < n + * such as x[i] = x[j], with x assumed to be mirrored. For example, for x = + * {1, 2, 3} (n = 3) + * + * index -5 -4 -3 -2 -1 0 1 2 3 4 5 6 + * value 2 3 3 2 1 1 2 3 3 2 1 1 + * + * _npy_pos_index_mirror(4, 3) will return 1, because x[4] = x[1]*/ +static inline npy_intp +__npy_pos_remainder(npy_intp i, npy_intp n) +{ + npy_intp k, l, j; + + /* Mirror i such as it is guaranteed to be positive */ + if (i < 0) { + i = - i - 1; + } + + /* compute k and l such as i = k * n + l, 0 <= l < k */ + k = i / n; + l = i - k * n; + + if (_NPY_IS_EVEN(k)) { + j = l; + } else { + j = n - 1 - l; + } + return j; +} +#undef _NPY_IS_EVEN + +#define _INF_SET_PTR_MIRROR(c) \ + lb = p->limits[c][0]; \ + bd = coordinates[c] + p->coordinates[c] - lb; \ + _coordinates[c] = lb + __npy_pos_remainder(bd, p->limits_sizes[c]); + +/* set the dataptr from its current coordinates */ +static char* +get_ptr_mirror(PyArrayIterObject* _iter, npy_intp *coordinates) +{ + int i; + npy_intp bd, _coordinates[NPY_MAXDIMS], lb; + PyArrayNeighborhoodIterObject *niter = (PyArrayNeighborhoodIterObject*)_iter; + PyArrayIterObject *p = niter->_internal_iter; + + for(i = 0; i < niter->nd; ++i) { + _INF_SET_PTR_MIRROR(i) + } + + return p->translate(p, _coordinates); +} +#undef _INF_SET_PTR_MIRROR + +/* compute l such as i = k * n + l, 0 <= l < |k| */ +static inline npy_intp +__npy_euclidean_division(npy_intp i, npy_intp n) +{ + npy_intp l; + + l = i % n; + if (l < 0) { + l += n; + } + return l; +} + +#define _INF_SET_PTR_CIRCULAR(c) \ + lb = p->limits[c][0]; \ + bd = coordinates[c] + p->coordinates[c] - lb; \ + _coordinates[c] = lb + __npy_euclidean_division(bd, p->limits_sizes[c]); + +static char* +get_ptr_circular(PyArrayIterObject* _iter, npy_intp *coordinates) +{ + int i; + npy_intp bd, _coordinates[NPY_MAXDIMS], lb; + PyArrayNeighborhoodIterObject *niter = (PyArrayNeighborhoodIterObject*)_iter; + PyArrayIterObject *p = niter->_internal_iter; + + for(i = 0; i < niter->nd; ++i) { + _INF_SET_PTR_CIRCULAR(i) + } + return p->translate(p, _coordinates); +} + +#undef _INF_SET_PTR_CIRCULAR + +/* + * fill and x->ao should have equivalent types + */ +/*NUMPY_API*/ +NPY_NO_EXPORT PyObject* +PyArray_NeighborhoodIterNew(PyArrayIterObject *x, intp *bounds, + int mode, PyArrayObject* fill) +{ + int i; + PyArrayNeighborhoodIterObject *ret; + + ret = _pya_malloc(sizeof(*ret)); + if (ret == NULL) { + return NULL; + } + PyObject_Init((PyObject *)ret, &PyArrayNeighborhoodIter_Type); + + array_iter_base_init((PyArrayIterObject*)ret, x->ao); + Py_INCREF(x); + ret->_internal_iter = x; + + ret->nd = x->ao->nd; + + for (i = 0; i < ret->nd; ++i) { + ret->dimensions[i] = x->ao->dimensions[i]; + } + + /* Compute the neighborhood size and copy the shape */ + ret->size = 1; + for (i = 0; i < ret->nd; ++i) { + ret->bounds[i][0] = bounds[2 * i]; + ret->bounds[i][1] = bounds[2 * i + 1]; + ret->size *= (ret->bounds[i][1] - ret->bounds[i][0]) + 1; + + /* limits keep track of valid ranges for the neighborhood: if a bound + * of the neighborhood is outside the array, then limits is the same as + * boundaries. On the contrary, if a bound is strictly inside the + * array, then limits correspond to the array range. For example, for + * an array [1, 2, 3], if bounds are [-1, 3], limits will be [-1, 3], + * but if bounds are [1, 2], then limits will be [0, 2]. + * + * This is used by neighborhood iterators stacked on top of this one */ + ret->limits[i][0] = ret->bounds[i][0] < 0 ? ret->bounds[i][0] : 0; + ret->limits[i][1] = ret->bounds[i][1] >= ret->dimensions[i] - 1 ? + ret->bounds[i][1] : + ret->dimensions[i] - 1; + ret->limits_sizes[i] = (ret->limits[i][1] - ret->limits[i][0]) + 1; + } + + switch (mode) { + case NPY_NEIGHBORHOOD_ITER_ZERO_PADDING: + ret->constant = PyArray_Zero(x->ao); + ret->mode = mode; + ret->translate = &get_ptr_constant; + break; + case NPY_NEIGHBORHOOD_ITER_ONE_PADDING: + ret->constant = PyArray_One(x->ao); + ret->mode = mode; + ret->translate = &get_ptr_constant; + break; + case NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING: + /* New reference in returned value of _set_constant if array + * object */ + assert(PyArray_EquivArrTypes(x->ao, fill) == NPY_TRUE); + ret->constant = _set_constant(ret, fill); + if (ret->constant == NULL) { + goto clean_x; + } + ret->mode = mode; + ret->translate = &get_ptr_constant; + break; + case NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING: + ret->mode = mode; + ret->constant = NULL; + ret->translate = &get_ptr_mirror; + break; + case NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING: + ret->mode = mode; + ret->constant = NULL; + ret->translate = &get_ptr_circular; + break; + default: + PyErr_SetString(PyExc_ValueError, "Unsupported padding mode"); + goto clean_x; + } + + /* + * XXX: we force x iterator to be non contiguous because we need + * coordinates... Modifying the iterator here is not great + */ + x->contiguous = 0; + + PyArrayNeighborhoodIter_Reset(ret); + + return (PyObject*)ret; + +clean_x: + Py_DECREF(ret->_internal_iter); + array_iter_base_dealloc((PyArrayIterObject*)ret); + _pya_free((PyArrayObject*)ret); + return NULL; +} + +static void neighiter_dealloc(PyArrayNeighborhoodIterObject* iter) +{ + if (iter->mode == NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING) { + if (PyArray_ISOBJECT(iter->_internal_iter->ao)) { + Py_DECREF(*(PyObject**)iter->constant); + } + } + if (iter->constant != NULL) { + PyDataMem_FREE(iter->constant); + } + Py_DECREF(iter->_internal_iter); + + array_iter_base_dealloc((PyArrayIterObject*)iter); + _pya_free((PyArrayObject*)iter); +} + +NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type = { +#if defined(NPY_PY3K) + PyVarObject_HEAD_INIT(NULL, 0) +#else + PyObject_HEAD_INIT(NULL) + 0, /* ob_size */ +#endif + "numpy.neigh_internal_iter", /* tp_name*/ + sizeof(PyArrayNeighborhoodIterObject), /* tp_basicsize*/ + 0, /* tp_itemsize*/ + (destructor)neighiter_dealloc, /* tp_dealloc*/ + 0, /* tp_print*/ + 0, /* tp_getattr*/ + 0, /* tp_setattr*/ +#if defined(NPY_PY3K) + 0, /* tp_reserved */ +#else + 0, /* tp_compare */ #endif + 0, /* tp_repr*/ + 0, /* tp_as_number*/ + 0, /* tp_as_sequence*/ + 0, /* tp_as_mapping*/ + 0, /* tp_hash */ + 0, /* tp_call*/ + 0, /* tp_str*/ + 0, /* tp_getattro*/ + 0, /* tp_setattro*/ + 0, /* tp_as_buffer*/ + Py_TPFLAGS_DEFAULT, /* tp_flags*/ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + (iternextfunc)0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ }; diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index a1c787a97..389adf02f 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -22,9 +22,9 @@ static PyObject * array_subscript_simple(PyArrayObject *self, PyObject *op); -/************************************************************************* - **************** Implement Mapping Protocol *************************** - *************************************************************************/ +/****************************************************************************** + *** IMPLEMENT MAPPING PROTOCOL *** + *****************************************************************************/ NPY_NO_EXPORT Py_ssize_t array_length(PyArrayObject *self) @@ -1612,63 +1612,62 @@ arraymapiter_dealloc(PyArrayMapIterObject *mit) * slice syntax. */ NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type = { +#if defined(NPY_PY3K) + PyVarObject_HEAD_INIT(NULL, 0) +#else PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.mapiter", /* tp_name */ - sizeof(PyArrayIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ +#endif + "numpy.mapiter", /* tp_name */ + sizeof(PyArrayIterObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arraymapiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - (traverseproc)0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ - -#ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + (destructor)arraymapiter_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if defined(NPY_PY3K) + 0, /* tp_reserved */ +#else + 0, /* tp_compare */ #endif + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ }; /** END of Subscript Iterator **/ diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index 8e9bf24e4..de99ca137 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -767,6 +767,49 @@ array_wraparray(PyArrayObject *self, PyObject *args) return NULL; } arr = PyTuple_GET_ITEM(args, 0); + if (arr == NULL) { + return NULL; + } + if (!PyArray_Check(arr)) { + PyErr_SetString(PyExc_TypeError, + "can only be called with ndarray object"); + return NULL; + } + + if (self->ob_type != arr->ob_type){ + Py_INCREF(PyArray_DESCR(arr)); + ret = PyArray_NewFromDescr(self->ob_type, + PyArray_DESCR(arr), + PyArray_NDIM(arr), + PyArray_DIMS(arr), + PyArray_STRIDES(arr), PyArray_DATA(arr), + PyArray_FLAGS(arr), (PyObject *)self); + if (ret == NULL) { + return NULL; + } + Py_INCREF(arr); + PyArray_BASE(ret) = arr; + return ret; + } else { + /*The type was set in __array_prepare__*/ + Py_INCREF(arr); + return arr; + } +} + + +static PyObject * +array_preparearray(PyArrayObject *self, PyObject *args) +{ + PyObject *arr; + PyObject *ret; + + if (PyTuple_Size(args) < 1) { + PyErr_SetString(PyExc_TypeError, + "only accepts 1 argument"); + return NULL; + } + arr = PyTuple_GET_ITEM(args, 0); if (!PyArray_Check(arr)) { PyErr_SetString(PyExc_TypeError, "can only be called with ndarray object"); @@ -2031,6 +2074,8 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { /* for subtypes */ {"__array__", (PyCFunction)array_getarray, METH_VARARGS, NULL}, + {"__array_prepare__", (PyCFunction)array_preparearray, + METH_VARARGS, NULL}, {"__array_wrap__", (PyCFunction)array_wraparray, METH_VARARGS, NULL}, diff --git a/numpy/core/src/multiarray/multiarray_tests.c.src b/numpy/core/src/multiarray/multiarray_tests.c.src new file mode 100644 index 000000000..c09ccbc9d --- /dev/null +++ b/numpy/core/src/multiarray/multiarray_tests.c.src @@ -0,0 +1,390 @@ +#include <Python.h> +#include "numpy/ndarrayobject.h" + +/* + * TODO: + * - Handle mode + */ + +/**begin repeat + * #type = double, int# + * #typenum = NPY_DOUBLE, NPY_INT# + */ +static int copy_@type@(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *niterx, + npy_intp *bounds, + PyObject **out) +{ + npy_intp i, j; + @type@ *ptr; + npy_intp odims[NPY_MAXDIMS]; + PyArrayObject *aout; + + /* + * For each point in itx, copy the current neighborhood into an array which + * is appended at the output list + */ + for (i = 0; i < itx->size; ++i) { + PyArrayNeighborhoodIter_Reset(niterx); + + for (j = 0; j < itx->ao->nd; ++j) { + odims[j] = bounds[2 * j + 1] - bounds[2 * j] + 1; + } + aout = (PyArrayObject*)PyArray_SimpleNew(itx->ao->nd, odims, @typenum@); + if (aout == NULL) { + return -1; + } + + ptr = (@type@*)aout->data; + + for (j = 0; j < niterx->size; ++j) { + *ptr = *((@type@*)niterx->dataptr); + PyArrayNeighborhoodIter_Next(niterx); + ptr += 1; + } + + Py_INCREF(aout); + PyList_Append(*out, (PyObject*)aout); + Py_DECREF(aout); + PyArray_ITER_NEXT(itx); + } + + return 0; +} +/**end repeat**/ + +static int copy_object(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *niterx, + npy_intp *bounds, + PyObject **out) +{ + npy_intp i, j; + npy_intp odims[NPY_MAXDIMS]; + PyArrayObject *aout; + PyArray_CopySwapFunc *copyswap = itx->ao->descr->f->copyswap; + npy_int itemsize = PyArray_ITEMSIZE(itx->ao); + + /* + * For each point in itx, copy the current neighborhood into an array which + * is appended at the output list + */ + for (i = 0; i < itx->size; ++i) { + PyArrayNeighborhoodIter_Reset(niterx); + + for (j = 0; j < itx->ao->nd; ++j) { + odims[j] = bounds[2 * j + 1] - bounds[2 * j] + 1; + } + aout = (PyArrayObject*)PyArray_SimpleNew(itx->ao->nd, odims, NPY_OBJECT); + if (aout == NULL) { + return -1; + } + + for (j = 0; j < niterx->size; ++j) { + copyswap(aout->data + j * itemsize, niterx->dataptr, 0, NULL); + PyArrayNeighborhoodIter_Next(niterx); + } + + Py_INCREF(aout); + PyList_Append(*out, (PyObject*)aout); + Py_DECREF(aout); + PyArray_ITER_NEXT(itx); + } + + return 0; +} + +static PyObject* +test_neighborhood_iterator(PyObject* NPY_UNUSED(self), PyObject* args) +{ + PyObject *x, *fill, *out, *b; + PyArrayObject *ax, *afill; + PyArrayIterObject *itx; + int i, typenum, mode, st; + npy_intp bounds[NPY_MAXDIMS*2]; + PyArrayNeighborhoodIterObject *niterx; + + if (!PyArg_ParseTuple(args, "OOOi", &x, &b, &fill, &mode)) { + return NULL; + } + + if (!PySequence_Check(b)) { + return NULL; + } + + typenum = PyArray_ObjectType(x, 0); + typenum = PyArray_ObjectType(fill, typenum); + + ax = (PyArrayObject*)PyArray_FromObject(x, typenum, 1, 10); + if (ax == NULL) { + return NULL; + } + if (PySequence_Size(b) != 2 * ax->nd) { + PyErr_SetString(PyExc_ValueError, + "bounds sequence size not compatible with x input"); + goto clean_ax; + } + + out = PyList_New(0); + if (out == NULL) { + goto clean_ax; + } + + itx = (PyArrayIterObject*)PyArray_IterNew(x); + if (itx == NULL) { + goto clean_out; + } + + /* Compute boundaries for the neighborhood iterator */ + for (i = 0; i < 2 * ax->nd; ++i) { + PyObject* bound; + bound = PySequence_GetItem(b, i); + if (bounds == NULL) { + goto clean_itx; + } + if (!PyInt_Check(bound)) { + PyErr_SetString(PyExc_ValueError, "bound not long"); + Py_DECREF(bound); + goto clean_itx; + } + bounds[i] = PyInt_AsLong(bound); + Py_DECREF(bound); + } + + /* Create the neighborhood iterator */ + afill = NULL; + if (mode == NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING) { + afill = (PyArrayObject *)PyArray_FromObject(fill, typenum, 0, 0); + if (afill == NULL) { + goto clean_itx; + } + } + + niterx = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew( + (PyArrayIterObject*)itx, bounds, mode, afill); + if (niterx == NULL) { + goto clean_afill; + } + + switch (typenum) { + case NPY_OBJECT: + st = copy_object(itx, niterx, bounds, &out); + break; + case NPY_INT: + st = copy_int(itx, niterx, bounds, &out); + break; + case NPY_DOUBLE: + st = copy_double(itx, niterx, bounds, &out); + break; + default: + PyErr_SetString(PyExc_ValueError, "Type not supported"); + goto clean_niterx; + } + + if (st) { + goto clean_niterx; + } + + Py_DECREF(niterx); + Py_XDECREF(afill); + Py_DECREF(itx); + + Py_DECREF(ax); + + return out; + +clean_niterx: + Py_DECREF(niterx); +clean_afill: + Py_XDECREF(afill); +clean_itx: + Py_DECREF(itx); +clean_out: + Py_DECREF(out); +clean_ax: + Py_DECREF(ax); + return NULL; +} + +static int +copy_double_double(PyArrayNeighborhoodIterObject *itx, + PyArrayNeighborhoodIterObject *niterx, + npy_intp *bounds, + PyObject **out) +{ + npy_intp i, j; + double *ptr; + npy_intp odims[NPY_MAXDIMS]; + PyArrayObject *aout; + + /* + * For each point in itx, copy the current neighborhood into an array which + * is appended at the output list + */ + PyArrayNeighborhoodIter_Reset(itx); + for (i = 0; i < itx->size; ++i) { + for (j = 0; j < itx->ao->nd; ++j) { + odims[j] = bounds[2 * j + 1] - bounds[2 * j] + 1; + } + aout = (PyArrayObject*)PyArray_SimpleNew(itx->ao->nd, odims, NPY_DOUBLE); + if (aout == NULL) { + return -1; + } + + ptr = (double*)aout->data; + + PyArrayNeighborhoodIter_Reset(niterx); + for (j = 0; j < niterx->size; ++j) { + *ptr = *((double*)niterx->dataptr); + ptr += 1; + PyArrayNeighborhoodIter_Next(niterx); + } + Py_INCREF(aout); + PyList_Append(*out, (PyObject*)aout); + Py_DECREF(aout); + PyArrayNeighborhoodIter_Next(itx); + } + return 0; +} + +static PyObject* +test_neighborhood_iterator_oob(PyObject* NPY_UNUSED(self), PyObject* args) +{ + PyObject *x, *out, *b1, *b2; + PyArrayObject *ax; + PyArrayIterObject *itx; + int i, typenum, mode1, mode2, st; + npy_intp bounds[NPY_MAXDIMS*2]; + PyArrayNeighborhoodIterObject *niterx1, *niterx2; + + if (!PyArg_ParseTuple(args, "OOiOi", &x, &b1, &mode1, &b2, &mode2)) { + return NULL; + } + + if (!PySequence_Check(b1) || !PySequence_Check(b2)) { + return NULL; + } + + typenum = PyArray_ObjectType(x, 0); + + ax = (PyArrayObject*)PyArray_FromObject(x, typenum, 1, 10); + if (ax == NULL) { + return NULL; + } + if (PySequence_Size(b1) != 2 * ax->nd) { + PyErr_SetString(PyExc_ValueError, + "bounds sequence 1 size not compatible with x input"); + goto clean_ax; + } + if (PySequence_Size(b2) != 2 * ax->nd) { + PyErr_SetString(PyExc_ValueError, + "bounds sequence 2 size not compatible with x input"); + goto clean_ax; + } + + out = PyList_New(0); + if (out == NULL) { + goto clean_ax; + } + + itx = (PyArrayIterObject*)PyArray_IterNew(x); + if (itx == NULL) { + goto clean_out; + } + + /* Compute boundaries for the neighborhood iterator */ + for (i = 0; i < 2 * ax->nd; ++i) { + PyObject* bound; + bound = PySequence_GetItem(b1, i); + if (bounds == NULL) { + goto clean_itx; + } + if (!PyInt_Check(bound)) { + PyErr_SetString(PyExc_ValueError, "bound not long"); + Py_DECREF(bound); + goto clean_itx; + } + bounds[i] = PyInt_AsLong(bound); + Py_DECREF(bound); + } + + /* Create the neighborhood iterator */ + niterx1 = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew( + (PyArrayIterObject*)itx, bounds, + mode1, NULL); + if (niterx1 == NULL) { + goto clean_out; + } + + for (i = 0; i < 2 * ax->nd; ++i) { + PyObject* bound; + bound = PySequence_GetItem(b2, i); + if (bounds == NULL) { + goto clean_itx; + } + if (!PyInt_Check(bound)) { + PyErr_SetString(PyExc_ValueError, "bound not long"); + Py_DECREF(bound); + goto clean_itx; + } + bounds[i] = PyInt_AsLong(bound); + Py_DECREF(bound); + } + + niterx2 = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew( + (PyArrayIterObject*)niterx1, bounds, + mode2, NULL); + if (niterx1 == NULL) { + goto clean_niterx1; + } + + switch (typenum) { + case NPY_DOUBLE: + st = copy_double_double(niterx1, niterx2, bounds, &out); + break; + default: + PyErr_SetString(PyExc_ValueError, "Type not supported"); + goto clean_niterx2; + } + + if (st) { + goto clean_niterx2; + } + + Py_DECREF(niterx2); + Py_DECREF(niterx1); + Py_DECREF(itx); + Py_DECREF(ax); + return out; + +clean_niterx2: + Py_DECREF(niterx2); +clean_niterx1: + Py_DECREF(niterx1); +clean_itx: + Py_DECREF(itx); +clean_out: + Py_DECREF(out); +clean_ax: + Py_DECREF(ax); + return NULL; +} + +static PyMethodDef Multiarray_TestsMethods[] = { + {"test_neighborhood_iterator", test_neighborhood_iterator, METH_VARARGS, NULL}, + {"test_neighborhood_iterator_oob", test_neighborhood_iterator_oob, METH_VARARGS, NULL}, + {NULL, NULL, 0, NULL} /* Sentinel */ +}; + +PyMODINIT_FUNC +initmultiarray_tests(void) +{ + PyObject *m; + + m = Py_InitModule("multiarray_tests", Multiarray_TestsMethods); + if (m == NULL) { + return; + } + import_array(); + if (PyErr_Occurred()) { + PyErr_SetString(PyExc_RuntimeError, + "cannot load umath_tests module."); + } +} diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 6f9c34313..a793e0364 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -27,7 +27,7 @@ #include "config.h" -#include "global.c" +NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #define PyAO PyArrayObject @@ -906,7 +906,7 @@ PyArray_CopyAndTranspose(PyObject *op) } /* - * Implementation which is common between PyArray_Correlate and PyArray_Acorrelate + * Implementation which is common between PyArray_Correlate and PyArray_Correlate2 * * inverted is set to 1 if computed correlate(ap2, ap1), 0 otherwise */ @@ -1065,13 +1065,13 @@ _pyarray_revert(PyArrayObject *ret) } /*NUMPY_API - * acorrelate(a1,a2,mode) + * correlate(a1,a2,mode) * - * This function computes the usual correlation (acorrelate(a1, a2) != - * accorrelate(a2, a1), and conjugate the second argument for complex inputs + * This function computes the usual correlation (correlate(a1, a2) != + * correlate(a2, a1), and conjugate the second argument for complex inputs */ NPY_NO_EXPORT PyObject * -PyArray_Acorrelate(PyObject *op1, PyObject *op2, int mode) +PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) { PyArrayObject *ap1, *ap2, *ret = NULL; int typenum; @@ -1796,7 +1796,7 @@ static PyObject *array_correlate(PyObject *NPY_UNUSED(dummy), PyObject *args, Py } static PyObject* -array_acorrelate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) +array_correlate2(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { PyObject *shape, *a0; int mode = 0; @@ -1806,7 +1806,7 @@ array_acorrelate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) &a0, &shape, &mode)) { return NULL; } - return PyArray_Acorrelate(a0, shape, mode); + return PyArray_Correlate2(a0, shape, mode); } static PyObject * @@ -2448,8 +2448,8 @@ static struct PyMethodDef array_module_methods[] = { {"correlate", (PyCFunction)array_correlate, METH_VARARGS | METH_KEYWORDS, NULL}, - {"acorrelate", - (PyCFunction)array_acorrelate, + {"correlate2", + (PyCFunction)array_correlate2, METH_VARARGS | METH_KEYWORDS, NULL}, {"frombuffer", (PyCFunction)array_frombuffer, @@ -2710,6 +2710,11 @@ PyMODINIT_FUNC initmultiarray(void) { if (PyType_Ready(&PyArrayMultiIter_Type) < 0) { return; } + PyArrayNeighborhoodIter_Type.tp_new = PyType_GenericNew; + if (PyType_Ready(&PyArrayNeighborhoodIter_Type) < 0) { + return; + } + PyArrayDescr_Type.tp_hash = PyArray_DescrHash; if (PyType_Ready(&PyArrayDescr_Type) < 0) { return; diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index 28ba7f47a..e50106866 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -37,63 +37,62 @@ NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[] = { * Floating, ComplexFloating, Flexible, Character, TimeInteger# */ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = { +#if defined(NPY_PY3K) + PyVarObject_HEAD_INIT(NULL, 0) +#else PyObject_HEAD_INIT(NULL) - 0, /* ob_size*/ - "numpy.@name@", /* tp_name*/ - sizeof(PyObject), /* tp_basicsize*/ - 0, /* tp_itemsize */ + 0, /* ob_size */ +#endif + "numpy.@name@", /* tp_name*/ + sizeof(PyObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ /* methods */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ - -#ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if defined(NPY_PY3K) + 0, /* tp_reserved */ +#else + 0, /* tp_compare */ #endif + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ }; /**end repeat**/ @@ -1805,63 +1804,62 @@ static PyBufferProcs gentype_as_buffer = { #define LEAFFLAGS Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type = { +#if defined(NPY_PY3K) + PyVarObject_HEAD_INIT(NULL, 0) +#else PyObject_HEAD_INIT(NULL) - 0, /* ob_size*/ - "numpy.generic", /* tp_name*/ - sizeof(PyObject), /* tp_basicsize*/ - 0, /* tp_itemsize */ + 0, /* ob_size */ +#endif + "numpy.generic", /* tp_name*/ + sizeof(PyObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ /* methods */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ - -#ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if defined(NPY_PY3K) + 0, /* tp_reserved */ +#else + 0, /* tp_compare */ #endif + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ }; static void @@ -2610,61 +2608,61 @@ object_arrtype_call(PyObjectScalarObject *obj, PyObject *args, PyObject *kwds) } NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type = { +#if defined(NPY_PY3K) + PyVarObject_HEAD_INIT(NULL, 0) +#else PyObject_HEAD_INIT(NULL) - 0, /* ob_size*/ - "numpy.object_", /* tp_name*/ - sizeof(PyObjectScalarObject), /* tp_basicsize*/ - 0, /* tp_itemsize */ - (destructor)object_arrtype_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - &object_arrtype_as_sequence, /* tp_as_sequence */ - &object_arrtype_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - (ternaryfunc)object_arrtype_call, /* tp_call */ - 0, /* tp_str */ - (getattrofunc)object_arrtype_getattro, /* tp_getattro */ - (setattrofunc)object_arrtype_setattro, /* tp_setattro */ - &object_arrtype_as_buffer, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* ob_size */ #endif + "numpy.object_", /* tp_name*/ + sizeof(PyObjectScalarObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ + (destructor)object_arrtype_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if defined(NPY_PY3K) + 0, /* tp_reserved */ +#else + 0, /* tp_compare */ +#endif + 0, /* tp_repr */ + 0, /* tp_as_number */ + &object_arrtype_as_sequence, /* tp_as_sequence */ + &object_arrtype_as_mapping, /* tp_as_mapping */ + 0, /* tp_hash */ + (ternaryfunc)object_arrtype_call, /* tp_call */ + 0, /* tp_str */ + (getattrofunc)object_arrtype_getattro, /* tp_getattro */ + (setattrofunc)object_arrtype_setattro, /* tp_setattro */ + &object_arrtype_as_buffer, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ }; static PyObject * @@ -2714,61 +2712,61 @@ gen_arrtype_subscript(PyObject *self, PyObject *key) * #ex = _,_,_,# */ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = { +#if defined(NPY_PY3K) + PyVarObject_HEAD_INIT(NULL, 0) +#else PyObject_HEAD_INIT(NULL) - 0, /* ob_size*/ - "numpy.@name@@ex@", /* tp_name*/ - sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/ - 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* ob_size */ #endif + "numpy.@name@@ex@", /* tp_name*/ + sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if defined(NPY_PY3K) + 0, /* tp_reserved */ +#else + 0, /* tp_compare */ +#endif + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ }; /**end repeat**/ @@ -2797,61 +2795,61 @@ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = { #define _THIS_SIZE "256" #endif NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = { +#if defined(NPY_PY3K) + PyVarObject_HEAD_INIT(NULL, 0) +#else PyObject_HEAD_INIT(NULL) - 0, /* ob_size*/ - "numpy.@name@" _THIS_SIZE, /* tp_name*/ - sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/ - 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* ob_size */ +#endif + "numpy.@name@" _THIS_SIZE, /* tp_name*/ + sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if defined(NPY_PY3K) + 0, /* tp_reserved */ +#else + 0, /* tp_compare */ #endif + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ }; #undef _THIS_SIZE @@ -2895,62 +2893,62 @@ static PyMappingMethods gentype_as_mapping = { #define _THIS_DOC "Composed of two " _THIS_SIZE2 " bit floats" - NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = { - PyObject_HEAD_INIT(NULL) - 0, /* ob_size*/ - "numpy.@name@" _THIS_SIZE1, /* tp_name*/ - sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/ - 0, /* tp_itemsize*/ - 0, /* tp_dealloc*/ - 0, /* tp_print*/ - 0, /* tp_getattr*/ - 0, /* tp_setattr*/ - 0, /* tp_compare*/ - 0, /* tp_repr*/ - 0, /* tp_as_number*/ - 0, /* tp_as_sequence*/ - 0, /* tp_as_mapping*/ - 0, /* tp_hash */ - 0, /* tp_call*/ - 0, /* tp_str*/ - 0, /* tp_getattro*/ - 0, /* tp_setattro*/ - 0, /* tp_as_buffer*/ - Py_TPFLAGS_DEFAULT, /* tp_flags*/ - _THIS_DOC, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ +NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = { +#if defined(NPY_PY3K) + PyVarObject_HEAD_INIT(0, 0) +#else + PyObject_HEAD_INIT(0) + 0, /* ob_size */ +#endif + "numpy.@name@" _THIS_SIZE1, /* tp_name*/ + sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/ + 0, /* tp_itemsize*/ + 0, /* tp_dealloc*/ + 0, /* tp_print*/ + 0, /* tp_getattr*/ + 0, /* tp_setattr*/ +#if defined(NPY_PY3K) + 0, /* tp_reserved */ +#else + 0, /* tp_compare */ #endif + 0, /* tp_repr*/ + 0, /* tp_as_number*/ + 0, /* tp_as_sequence*/ + 0, /* tp_as_mapping*/ + 0, /* tp_hash */ + 0, /* tp_call*/ + 0, /* tp_str*/ + 0, /* tp_getattro*/ + 0, /* tp_setattro*/ + 0, /* tp_as_buffer*/ + Py_TPFLAGS_DEFAULT, /* tp_flags*/ + _THIS_DOC, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ }; #undef _THIS_SIZE1 #undef _THIS_SIZE2 diff --git a/numpy/core/src/_signbit.c b/numpy/core/src/npymath/_signbit.c index a2ad38162..a2ad38162 100644 --- a/numpy/core/src/_signbit.c +++ b/numpy/core/src/npymath/_signbit.c diff --git a/numpy/core/src/npy_math.c.src b/numpy/core/src/npymath/npy_math.c.src index 21fc7d427..3fde802a2 100644 --- a/numpy/core/src/npy_math.c.src +++ b/numpy/core/src/npymath/npy_math.c.src @@ -40,6 +40,18 @@ * #ifdef SYMBOL_DEFINED_WEIRD_PLATFORM * double exp(double); * #endif + * + * Some of the code is taken from msun library in FreeBSD, with the following + * notice: + * + * ==================================================== + * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + * + * Developed at SunPro, a Sun Microsystems, Inc. business. + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== */ #include <Python.h> @@ -48,6 +60,8 @@ #include "config.h" #include "numpy/npy_math.h" +#include "npy_math_private.h" + /* ***************************************************************************** ** BASIC MATH FUNCTIONS ** @@ -56,61 +70,158 @@ /* Original code by Konrad Hinsen. */ #ifndef HAVE_EXPM1 -static double expm1(double x) +double npy_expm1(double x) { - double u = exp(x); + double u = npy_exp(x); if (u == 1.0) { return x; } else if (u-1.0 == -1.0) { return -1; } else { - return (u-1.0) * x/log(u); + return (u-1.0) * x/npy_log(u); } } #endif #ifndef HAVE_LOG1P -static double log1p(double x) +double npy_log1p(double x) { double u = 1. + x; if (u == 1.0) { return x; } else { - return log(u) * x / (u - 1); + return npy_log(u) * x / (u - 1); } } #endif +/* Taken from FreeBSD mlib, adapted for numpy + * + * XXX: we could be a bit faster by reusing high/low words for inf/nan + * classification instead of calling npy_isinf/npy_isnan: we should have some + * macros for this, though, instead of doing it manually + */ +#ifndef HAVE_ATAN2 +/* XXX: we should have this in npy_math.h */ +#define NPY_DBL_EPSILON 1.2246467991473531772E-16 +double npy_atan2(double y, double x) +{ + npy_int32 k, m, iy, ix, hx, hy; + npy_uint32 lx,ly; + double z; + + EXTRACT_WORDS(hx, lx, x); + ix = hx & 0x7fffffff; + EXTRACT_WORDS(hy, ly, y); + iy = hy & 0x7fffffff; + + /* if x or y is nan, return nan */ + if (npy_isnan(x * y)) { + return x + y; + } + + if (x == 1.0) { + return npy_atan(y); + } + + m = 2 * npy_signbit(x) + npy_signbit(y); + if (y == 0.0) { + switch(m) { + case 0: + case 1: return y; /* atan(+-0,+anything)=+-0 */ + case 2: return NPY_PI;/* atan(+0,-anything) = pi */ + case 3: return -NPY_PI;/* atan(-0,-anything) =-pi */ + } + } + + if (x == 0.0) { + return y > 0 ? NPY_PI_2 : -NPY_PI_2; + } + + if (npy_isinf(x)) { + if (npy_isinf(y)) { + switch(m) { + case 0: return NPY_PI_4;/* atan(+INF,+INF) */ + case 1: return -NPY_PI_4;/* atan(-INF,+INF) */ + case 2: return 3.0*NPY_PI_4;/*atan(+INF,-INF)*/ + case 3: return -3.0*NPY_PI_4;/*atan(-INF,-INF)*/ + } + } else { + switch(m) { + case 0: return NPY_PZERO; /* atan(+...,+INF) */ + case 1: return NPY_NZERO; /* atan(-...,+INF) */ + case 2: return NPY_PI; /* atan(+...,-INF) */ + case 3: return -NPY_PI; /* atan(-...,-INF) */ + } + } + } + + if (npy_isinf(y)) { + return y > 0 ? NPY_PI_2 : -NPY_PI_2; + } + + /* compute y/x */ + k = (iy - ix)>>20; + if(k > 60) { /* |y/x| > 2**60 */ + z = NPY_PI_2 + 0.5 * NPY_DBL_EPSILON; + m &= 1; + } else if(hx < 0 && k < -60) { + z = 0.0; /* 0 > |y|/x > -2**-60 */ + } else { + z = npy_atan(npy_fabs(y/x)); /* safe to do y/x */ + } + + switch (m) { + case 0: return z ; /* atan(+,+) */ + case 1: return -z ; /* atan(-,+) */ + case 2: return NPY_PI - (z - NPY_DBL_EPSILON);/* atan(+,-) */ + default: /* case 3 */ + return (z - NPY_DBL_EPSILON) - NPY_PI;/* atan(-,-) */ + } +} + +#endif + #ifndef HAVE_HYPOT -static double hypot(double x, double y) +double npy_hypot(double x, double y) { double yx; - x = fabs(x); - y = fabs(y); + /* Handle the case where x or y is a NaN */ + if (npy_isnan(x * y)) { + if (npy_isinf(x) || npy_isinf(y)) { + return NPY_INFINITY; + } else { + return NPY_NAN; + } + } + + x = npy_fabs(x); + y = npy_fabs(y); if (x < y) { double temp = x; x = y; y = temp; } - if (x == 0.) + if (x == 0.) { return 0.; + } else { yx = y/x; - return x*sqrt(1.+yx*yx); + return x*npy_sqrt(1.+yx*yx); } } #endif #ifndef HAVE_ACOSH -static double acosh(double x) +double npy_acosh(double x) { - return 2*log(sqrt((x+1.0)/2)+sqrt((x-1.0)/2)); + return 2*npy_log(npy_sqrt((x+1.0)/2)+npy_sqrt((x-1.0)/2)); } #endif #ifndef HAVE_ASINH -static double asinh(double xx) +double npy_asinh(double xx) { double x, d; int sign; @@ -125,37 +236,37 @@ static double asinh(double xx) if (x > 1e8) { d = x; } else { - d = sqrt(x*x + 1); + d = npy_sqrt(x*x + 1); } - return sign*log1p(x*(1.0 + x/(d+1))); + return sign*npy_log1p(x*(1.0 + x/(d+1))); } #endif #ifndef HAVE_ATANH -static double atanh(double x) +double npy_atanh(double x) { if (x > 0) { - return -0.5*log1p(-2.0*x/(1.0 + x)); + return -0.5*npy_log1p(-2.0*x/(1.0 + x)); } else { - return 0.5*log1p(2.0*x/(1.0 - x)); + return 0.5*npy_log1p(2.0*x/(1.0 - x)); } } #endif #ifndef HAVE_RINT -static double rint(double x) +double npy_rint(double x) { double y, r; - y = floor(x); + y = npy_floor(x); r = x - y; if (r > 0.5) goto rndup; /* Round to nearest even */ if (r==0.5) { - r = y - 2.0*floor(0.5*y); + r = y - 2.0*npy_floor(0.5*y); if (r==1.0) { rndup: y+=1.0; @@ -166,30 +277,41 @@ static double rint(double x) #endif #ifndef HAVE_TRUNC -static double trunc(double x) +double npy_trunc(double x) { - return x < 0 ? ceil(x) : floor(x); + return x < 0 ? npy_ceil(x) : npy_floor(x); } #endif #ifndef HAVE_EXP2 #define LOG2 0.69314718055994530943 -static double exp2(double x) +double npy_exp2(double x) { - return exp(LOG2*x); + return npy_exp(LOG2*x); } #undef LOG2 #endif #ifndef HAVE_LOG2 #define INVLOG2 1.4426950408889634074 -static double log2(double x) +double npy_log2(double x) { - return INVLOG2*log(x); + return INVLOG2*npy_log(x); } #undef INVLOG2 #endif +#ifndef HAVE_COPYSIGN +double npy_copysign(double x, double y) +{ + npy_uint32 hx,hy; + GET_HIGH_WORD(hx,x); + GET_HIGH_WORD(hy,y); + SET_HIGH_WORD(x,(hx&0x7fffffff)|(hy&0x80000000)); + return x; +} +#endif + /* ***************************************************************************** ** IEEE 754 FPU HANDLING ** @@ -247,25 +369,25 @@ int _npy_signbit_ld (long double x) #undef @kind@@c@ #endif #ifndef HAVE_@KIND@@C@ -static @type@ @kind@@c@(@type@ x) +@type@ npy_@kind@@c@(@type@ x) { - return (@type@) @kind@((double)x); + return (@type@) npy_@kind@((double)x); } #endif /**end repeat1**/ /**begin repeat1 - * #kind = atan2,hypot,pow,fmod# - * #KIND = ATAN2,HYPOT,POW,FMOD# + * #kind = atan2,hypot,pow,fmod,copysign# + * #KIND = ATAN2,HYPOT,POW,FMOD,COPYSIGN# */ #ifdef @kind@@c@ #undef @kind@@c@ #endif #ifndef HAVE_@KIND@@C@ -static @type@ @kind@@c@(@type@ x, @type@ y) +@type@ npy_@kind@@c@(@type@ x, @type@ y) { - return (@type@) @kind@((double)x, (double) y); + return (@type@) npy_@kind@((double)x, (double) y); } #endif /**end repeat1**/ @@ -274,10 +396,10 @@ static @type@ @kind@@c@(@type@ x, @type@ y) #undef modf@c@ #endif #ifndef HAVE_MODF@C@ -static @type@ modf@c@(@type@ x, @type@ *iptr) +@type@ npy_modf@c@(@type@ x, @type@ *iptr) { double niptr; - double y = modf((double)x, &niptr); + double y = npy_modf((double)x, &niptr); *iptr = (@type@) niptr; return (@type@) y; } @@ -285,27 +407,6 @@ static @type@ modf@c@(@type@ x, @type@ *iptr) /**end repeat**/ -/* - * Useful constants in three precisions: - * XXX: those should really be in the header - */ - -/**begin repeat - * #c = f, ,l# - * #C = F, ,L# - */ -#define NPY_E@c@ 2.7182818284590452353602874713526625@C@ /* e */ -#define NPY_LOG2E@c@ 1.4426950408889634073599246810018921@C@ /* log_2 e */ -#define NPY_LOG10E@c@ 0.4342944819032518276511289189166051@C@ /* log_10 e */ -#define NPY_LOGE2@c@ 0.6931471805599453094172321214581766@C@ /* log_e 2 */ -#define NPY_LOGE10@c@ 2.3025850929940456840179914546843642@C@ /* log_e 10 */ -#define NPY_PI@c@ 3.1415926535897932384626433832795029@C@ /* pi */ -#define NPY_PI_2@c@ 1.5707963267948966192313216916397514@C@ /* pi/2 */ -#define NPY_PI_4@c@ 0.7853981633974483096156608458198757@C@ /* pi/4 */ -#define NPY_1_PI@c@ 0.3183098861837906715377675267450287@C@ /* 1/pi */ -#define NPY_2_PI@c@ 0.6366197723675813430755350534900574@C@ /* 2/pi */ -/**end repeat**/ - /* * Non standard functions */ @@ -321,17 +422,17 @@ static @type@ modf@c@(@type@ x, @type@ *iptr) #define RAD2DEG (180.0@c@/NPY_PI@c@) #define DEG2RAD (NPY_PI@c@/180.0@c@) -static @type@ rad2deg@c@(@type@ x) +@type@ npy_rad2deg@c@(@type@ x) { return x*RAD2DEG; } -static @type@ deg2rad@c@(@type@ x) +@type@ npy_deg2rad@c@(@type@ x) { return x*DEG2RAD; } -static @type@ log2_1p@c@(@type@ x) +@type@ npy_log2_1p@c@(@type@ x) { @type@ u = 1 + x; if (u == 1) { @@ -341,9 +442,9 @@ static @type@ log2_1p@c@(@type@ x) } } -static @type@ exp2_1m@c@(@type@ x) +@type@ npy_exp2_1m@c@(@type@ x) { - @type@ u = exp@c@(x); + @type@ u = npy_exp@c@(x); if (u == 1.0) { return LOGE2*x; } else if (u - 1 == -1) { @@ -353,31 +454,36 @@ static @type@ exp2_1m@c@(@type@ x) } } -static @type@ logaddexp@c@(@type@ x, @type@ y) +@type@ npy_logaddexp@c@(@type@ x, @type@ y) { const @type@ tmp = x - y; if (tmp > 0) { return x + npy_log1p@c@(npy_exp@c@(-tmp)); } - else { + else if (tmp <= 0) { return y + npy_log1p@c@(npy_exp@c@(tmp)); } + else { + /* NaNs, or infinities of the same sign involved */ + return x + y; + } } -static @type@ logaddexp2@c@(@type@ x, @type@ y) +@type@ npy_logaddexp2@c@(@type@ x, @type@ y) { const @type@ tmp = x - y; if (tmp > 0) { - return x + log2_1p@c@(npy_exp2@c@(-tmp)); + return x + npy_log2_1p@c@(npy_exp2@c@(-tmp)); + } + else if (tmp <= 0) { + return y + npy_log2_1p@c@(npy_exp2@c@(tmp)); } else { - return y + log2_1p@c@(npy_exp2@c@(tmp)); + /* NaNs, or infinities of the same sign involved */ + return x + y; } } -#define degrees@c@ rad2deg@c@ -#define radians@c@ deg2rad@c@ - #undef LOGE2 #undef LOG2E #undef RAD2DEG @@ -386,38 +492,46 @@ static @type@ logaddexp2@c@(@type@ x, @type@ y) /**end repeat**/ /* - * Decorate all the functions: those are the public ones + * Decorate all the math functions which are available on the current platform */ /**begin repeat * #type = npy_longdouble,double,float# * #c = l,,f# + * #C = L,,F# */ /**begin repeat1 * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, - * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2, - * rad2deg,deg2rad,exp2_1m# + * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2# + * #KIND = SIN,COS,TAN,SINH,COSH,TANH,FABS,FLOOR,CEIL,RINT,TRUNC,SQRT,LOG10, + * LOG,EXP,EXPM1,ASIN,ACOS,ATAN,ASINH,ACOSH,ATANH,LOG1P,EXP2,LOG2# */ - +#ifdef HAVE_@KIND@@C@ @type@ npy_@kind@@c@(@type@ x) { return @kind@@c@(x); } +#endif /**end repeat1**/ /**begin repeat1 - * #kind = atan2,hypot,pow,fmod,logaddexp,logaddexp2# + * #kind = atan2,hypot,pow,fmod,copysign# + * #KIND = ATAN2,HYPOT,POW,FMOD,COPYSIGN# */ +#ifdef HAVE_@KIND@@C@ @type@ npy_@kind@@c@(@type@ x, @type@ y) { return @kind@@c@(x, y); } +#endif /**end repeat1**/ +#ifdef HAVE_MODF@C@ @type@ npy_modf@c@(@type@ x, @type@ *iptr) { return modf@c@(x, iptr); } +#endif /**end repeat**/ diff --git a/numpy/core/src/npymath/npy_math_private.h b/numpy/core/src/npymath/npy_math_private.h new file mode 100644 index 000000000..ea7c47fe8 --- /dev/null +++ b/numpy/core/src/npymath/npy_math_private.h @@ -0,0 +1,121 @@ +/* + * + * ==================================================== + * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + * + * Developed at SunPro, a Sun Microsystems, Inc. business. + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ + +/* + * from: @(#)fdlibm.h 5.1 93/09/24 + * $FreeBSD$ + */ + +#ifndef _NPY_MATH_PRIVATE_H_ +#define _NPY_MATH_PRIVATE_H_ + +#include <numpy/npy_endian.h> + +/* + * The original fdlibm code used statements like: + * n0 = ((*(int*)&one)>>29)^1; * index of high word * + * ix0 = *(n0+(int*)&x); * high word of x * + * ix1 = *((1-n0)+(int*)&x); * low word of x * + * to dig two 32 bit words out of the 64 bit IEEE floating point + * value. That is non-ANSI, and, moreover, the gcc instruction + * scheduler gets it wrong. We instead use the following macros. + * Unlike the original code, we determine the endianness at compile + * time, not at run time; I don't see much benefit to selecting + * endianness at run time. + */ + +/* + * A union which permits us to convert between a double and two 32 bit + * ints. + */ + +/* XXX: not really, but we already make this assumption elsewhere. Will have to + * fix this at some point */ +#define IEEE_WORD_ORDER NPY_BYTE_ORDER + +#if IEEE_WORD_ORDER == NPY_BIG_ENDIAN + +typedef union +{ + double value; + struct + { + npy_uint32 msw; + npy_uint32 lsw; + } parts; +} ieee_double_shape_type; + +#endif + +#if IEEE_WORD_ORDER == NPY_LITTLE_ENDIAN + +typedef union +{ + double value; + struct + { + npy_uint32 lsw; + npy_uint32 msw; + } parts; +} ieee_double_shape_type; + +#endif + +/* Get two 32 bit ints from a double. */ + +#define EXTRACT_WORDS(ix0,ix1,d) \ +do { \ + ieee_double_shape_type ew_u; \ + ew_u.value = (d); \ + (ix0) = ew_u.parts.msw; \ + (ix1) = ew_u.parts.lsw; \ +} while (0) + +/* Get the more significant 32 bit int from a double. */ + +#define GET_HIGH_WORD(i,d) \ +do { \ + ieee_double_shape_type gh_u; \ + gh_u.value = (d); \ + (i) = gh_u.parts.msw; \ +} while (0) + +/* Get the less significant 32 bit int from a double. */ + +#define GET_LOW_WORD(i,d) \ +do { \ + ieee_double_shape_type gl_u; \ + gl_u.value = (d); \ + (i) = gl_u.parts.lsw; \ +} while (0) + +/* Set the more significant 32 bits of a double from an int. */ + +#define SET_HIGH_WORD(d,v) \ +do { \ + ieee_double_shape_type sh_u; \ + sh_u.value = (d); \ + sh_u.parts.msw = (v); \ + (d) = sh_u.value; \ +} while (0) + +/* Set the less significant 32 bits of a double from an int. */ + +#define SET_LOW_WORD(d,v) \ +do { \ + ieee_double_shape_type sl_u; \ + sl_u.value = (d); \ + sl_u.parts.lsw = (v); \ + (d) = sl_u.value; \ +} while (0) + +#endif /* !_NPY_MATH_PRIVATE_H_ */ diff --git a/numpy/core/src/py3k_notes.txt b/numpy/core/src/py3k_notes.txt new file mode 100644 index 000000000..e31755012 --- /dev/null +++ b/numpy/core/src/py3k_notes.txt @@ -0,0 +1,197 @@ +Notes on making the transition to python 3.x +============================================ + +PyTypeObject +------------ + +The PyTypeObject of py3k is binary compatible with the py2k version and the +old initializers should work. However, there are several considerations to +keep in mind. + +1) Because the first three slots are now part of a struct some compilers issue +warnings if they are initialized in the old way. + +2) The compare slot has been made reserved in order to preserve binary +compatibily while the tp_compare function went away. The tp_richcompare +function has replaced it and we need to use that slot instead. This will +likely require modifications in the searchsorted functions and generic sorts +that currently use the compare function. + +3) The previous numpy practice of initializing the COUNT_ALLOCS slots was +bogus. They are not supposed to be explicitly initialized and were out of +place in any case because an extra base slot was added in python 2.6. + +Because of these facts it was thought better to use #ifdefs to bring the old +initializers up to py3k snuff rather than just fill the tp_richcompare slot. +They also serve to mark the places where changes have been made. The new form +is shown below. Note that explicit initialization can stop once none of the +remaining entries are non-zero, because zero is the default value that +variables with non-local linkage receive. + + +NPY_NO_EXPORT PyTypeObject Foo_Type = { +#if defined(NPY_PY3K) + PyVarObject_HEAD_INIT(0,0) +#else + PyObject_HEAD_INIT(0) + 0, /* ob_size */ +#endif + "numpy.foo" /* tp_name */ + 0, /* tp_basicsize */ + 0, /* tp_itemsize */ + /* methods */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if defined(NPY_PY3K) + (void *)0, /* tp_reserved */ +#else + 0, /* tp_compare */ +#endif + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ + 0 /* tp_version_tag (2.6) */ +}; + +checklist of types having tp_compare but no tp_richcompare + +1) multiarray/flagsobject.c + +PyNumberMethods +--------------- + +Types with tp_as_number defined + +1) multiarray/arrayobject.c + +The PyNumberMethods struct has changed enough that it looks easiest to just +have an alternate version. Note that np_divide, np_long, np_oct, np_hex, and +np_inplace_divide have gone away. The slot np_int is what np_long used to be, +tp_divide is now tp_floor_divide, and np_inplace_divide is now +np_inplace_floor_divide. We will also have to make sure the *_true_divide +variants are defined. This should also be done for python < 3.x, but that +introduces a requirement for the Py_TPFLAGS_HAVE_CLASS in the type flag. + +/* + * Number implementations must check *both* arguments for proper type and + * implement the necessary conversions in the slot functions themselves. +*/ +PyNumberMethods foo_number_methods = { + (binaryfunc)0, /* nb_add */ + (binaryfunc)0, /* nb_subtract */ + (binaryfunc)0, /* nb_multiply */ + (binaryfunc)0, /* nb_remainder */ + (binaryfunc)0, /* nb_divmod */ + (ternaryfunc)0, /* nb_power */ + (unaryfunc)0, /* nb_negative */ + (unaryfunc)0, /* nb_positive */ + (unaryfunc)0, /* nb_absolute */ + (inquiry)0, /* nb_bool, nee nb_nonzero */ + (unaryfunc)0, /* nb_invert */ + (binaryfunc)0, /* nb_lshift */ + (binaryfunc)0, /* nb_rshift */ + (binaryfunc)0, /* nb_and */ + (binaryfunc)0, /* nb_xor */ + (binaryfunc)0, /* nb_or */ + (unaryfunc)0, /* nb_int */ + (void *)0, /* nb_reserved, nee nb_long */ + (unaryfunc)0, /* nb_float */ + (binaryfunc)0, /* nb_inplace_add */ + (binaryfunc)0, /* nb_inplace_subtract */ + (binaryfunc)0, /* nb_inplace_multiply */ + (binaryfunc)0, /* nb_inplace_remainder */ + (ternaryfunc)0, /* nb_inplace_power */ + (binaryfunc)0, /* nb_inplace_lshift */ + (binaryfunc)0, /* nb_inplace_rshift */ + (binaryfunc)0, /* nb_inplace_and */ + (binaryfunc)0, /* nb_inplace_xor */ + (binaryfunc)0, /* nb_inplace_or */ + (binaryfunc)0, /* nb_floor_divide */ + (binaryfunc)0, /* nb_true_divide */ + (binaryfunc)0, /* nb_inplace_floor_divide */ + (binaryfunc)0, /* nb_inplace_true_divide */ + (unaryfunc)0 /* nb_index */ +}; + +PySequenceMethods +----------------- + +Types with tp_as_sequence defined + +1) multiarray/descriptor.c +2) multiarray/scalartypes.c.src +3) multiarray/arrayobject.c + +PySequenceMethods in py3k are binary compatible with py2k, but some of the +slots have gone away. I suspect this means some functions need redefining so +the semantics of the slots needs to be checked. + +PySequenceMethods foo_sequence_methods = { + (lenfunc)0, /* sq_length */ + (binaryfunc)0, /* sq_concat */ + (ssizeargfunc)0, /* sq_repeat */ + (ssizeargfunc)0, /* sq_item */ + (void *)0, /* nee sq_slice */ + (ssizeobjargproc)0, /* sq_ass_item */ + (void *)0, /* nee sq_ass_slice */ + (objobjproc)0, /* sq_contains */ + (binaryfunc)0, /* sq_inplace_concat */ + (ssizeargfunc)0 /* sq_inplace_repeat */ +}; + +PyMappingMethods +---------------- + +Types with tp_as_mapping defined + +1) multiarray/descriptor.c +2) multiarray/iterators.c +3) multiarray/scalartypes.c.src +4) multiarray/flagsobject.c +5) multiarray/arrayobject.c + +PyMappingMethods in py3k look to be the same as in py2k. The semantics +of the slots needs to be checked. + +PyMappingMethods foo_mapping_methods = { + (lenfunc)0, /* mp_length */ + (binaryfunc)0, /* mp_subscript */ + (objobjargproc)0 /* mp_ass_subscript */ +}; + diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 25e15bca1..10cfa8716 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1044,6 +1044,16 @@ NPY_NO_EXPORT void } /**end repeat1**/ +NPY_NO_EXPORT void +@TYPE@_copysign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + *((@type@ *)op1)= npy_copysign@c@(in1, in2); + } +} + /**begin repeat1 * #kind = maximum, minimum# * #OP = >=, <=# @@ -1265,9 +1275,18 @@ C@TYPE@_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func const @type@ in1i = ((@type@ *)ip1)[1]; const @type@ in2r = ((@type@ *)ip2)[0]; const @type@ in2i = ((@type@ *)ip2)[1]; - @type@ d = in2r*in2r + in2i*in2i; - ((@type@ *)op1)[0] = (in1r*in2r + in1i*in2i)/d; - ((@type@ *)op1)[1] = (in1i*in2r - in1r*in2i)/d; + if (npy_fabs@c@(in2r) >= npy_fabs@c@(in2i)) { + const @type@ rat = in2i/in2r; + const @type@ scl = 1.0@c@/(in2r + in2i*rat); + ((@type@ *)op1)[0] = (in1r + in1i*rat)*scl; + ((@type@ *)op1)[1] = (in1i - in1r*rat)*scl; + } + else { + const @type@ rat = in2r/in2i; + const @type@ scl = 1.0@c@/(in2i + in2r*rat); + ((@type@ *)op1)[0] = (in1r*rat + in1i)*scl; + ((@type@ *)op1)[1] = (in1i*rat - in1r)*scl; + } } } @@ -1279,9 +1298,16 @@ C@TYPE@_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSE const @type@ in1i = ((@type@ *)ip1)[1]; const @type@ in2r = ((@type@ *)ip2)[0]; const @type@ in2i = ((@type@ *)ip2)[1]; - @type@ d = in2r*in2r + in2i*in2i; - ((@type@ *)op1)[0] = npy_floor@c@((in1r*in2r + in1i*in2i)/d); - ((@type@ *)op1)[1] = 0; + if (npy_fabs@c@(in2r) >= npy_fabs@c@(in2i)) { + const @type@ rat = in2i/in2r; + ((@type@ *)op1)[0] = npy_floor@c@((in1r + in1i*rat)/(in2r + in2i*rat)); + ((@type@ *)op1)[1] = 0; + } + else { + const @type@ rat = in2r/in2i; + ((@type@ *)op1)[0] = npy_floor@c@((in1r*rat + in1i)/(in2i + in2r*rat)); + ((@type@ *)op1)[1] = 0; + } } } diff --git a/numpy/core/src/umath/loops.h b/numpy/core/src/umath/loops.h index 9de4c5893..bf33ea88c 100644 --- a/numpy/core/src/umath/loops.h +++ b/numpy/core/src/umath/loops.h @@ -1527,6 +1527,9 @@ NPY_NO_EXPORT void FLOAT_signbit(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +FLOAT_copysign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); + NPY_NO_EXPORT void FLOAT_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); @@ -1668,6 +1671,9 @@ NPY_NO_EXPORT void DOUBLE_signbit(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +DOUBLE_copysign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); + NPY_NO_EXPORT void DOUBLE_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); @@ -1809,6 +1815,9 @@ NPY_NO_EXPORT void LONGDOUBLE_signbit(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +LONGDOUBLE_copysign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); + NPY_NO_EXPORT void LONGDOUBLE_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 97fcc8124..94152ccb7 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -239,6 +239,122 @@ static char *_types_msg = "function not supported for these types, " \ "and can't coerce safely to supported types"; /* + * This function analyzes the input arguments + * and determines an appropriate __array_prepare__ function to call + * for the outputs. + * + * If an output argument is provided, then it is wrapped + * with its own __array_prepare__ not with the one determined by + * the input arguments. + * + * if the provided output argument is already an ndarray, + * the wrapping function is None (which means no wrapping will + * be done --- not even PyArray_Return). + * + * A NULL is placed in output_wrap for outputs that + * should just have PyArray_Return called. + */ +static void +_find_array_prepare(PyObject *args, PyObject **output_wrap, int nin, int nout) +{ + Py_ssize_t nargs; + int i; + int np = 0; + PyObject *with_wrap[NPY_MAXARGS], *wraps[NPY_MAXARGS]; + PyObject *obj, *wrap = NULL; + + nargs = PyTuple_GET_SIZE(args); + for (i = 0; i < nin; i++) { + obj = PyTuple_GET_ITEM(args, i); + if (PyArray_CheckExact(obj) || PyArray_IsAnyScalar(obj)) { + continue; + } + wrap = PyObject_GetAttrString(obj, "__array_prepare__"); + if (wrap) { + if (PyCallable_Check(wrap)) { + with_wrap[np] = obj; + wraps[np] = wrap; + ++np; + } + else { + Py_DECREF(wrap); + wrap = NULL; + } + } + else { + PyErr_Clear(); + } + } + if (np > 0) { + /* If we have some wraps defined, find the one of highest priority */ + wrap = wraps[0]; + if (np > 1) { + double maxpriority = PyArray_GetPriority(with_wrap[0], + PyArray_SUBTYPE_PRIORITY); + for (i = 1; i < np; ++i) { + double priority = PyArray_GetPriority(with_wrap[i], + PyArray_SUBTYPE_PRIORITY); + if (priority > maxpriority) { + maxpriority = priority; + Py_DECREF(wrap); + wrap = wraps[i]; + } + else { + Py_DECREF(wraps[i]); + } + } + } + } + + /* + * Here wrap is the wrapping function determined from the + * input arrays (could be NULL). + * + * For all the output arrays decide what to do. + * + * 1) Use the wrap function determined from the input arrays + * This is the default if the output array is not + * passed in. + * + * 2) Use the __array_prepare__ method of the output object. + * This is special cased for + * exact ndarray so that no PyArray_Return is + * done in that case. + */ + for (i = 0; i < nout; i++) { + int j = nin + i; + int incref = 1; + output_wrap[i] = wrap; + if (j < nargs) { + obj = PyTuple_GET_ITEM(args, j); + if (obj == Py_None) { + continue; + } + if (PyArray_CheckExact(obj)) { + output_wrap[i] = Py_None; + } + else { + PyObject *owrap = PyObject_GetAttrString(obj, + "__array_prepare__"); + incref = 0; + if (!(owrap) || !(PyCallable_Check(owrap))) { + Py_XDECREF(owrap); + owrap = wrap; + incref = 1; + PyErr_Clear(); + } + output_wrap[i] = owrap; + } + } + if (incref) { + Py_XINCREF(output_wrap[i]); + } + } + Py_XDECREF(wrap); + return; +} + +/* * Called for non-NULL user-defined functions. * The object should be a CObject pointing to a linked-list of functions * storing the function, data, and signature of all user-defined functions. @@ -1059,6 +1175,7 @@ construct_arrays(PyUFuncLoopObject *loop, PyObject *args, PyArrayObject **mps, npy_intp temp_dims[NPY_MAXDIMS]; npy_intp *out_dims; int out_nd; + PyObject *wraparr[NPY_MAXARGS]; /* Check number of arguments */ nargs = PyTuple_Size(args); @@ -1337,16 +1454,60 @@ construct_arrays(PyUFuncLoopObject *loop, PyObject *args, PyArrayObject **mps, return -1; } - /* Recover mps[i]. */ - if (self->core_enabled) { - PyArrayObject *ao = mps[i]; - mps[i] = (PyArrayObject *)mps[i]->base; - Py_DECREF(ao); - } + /* Recover mps[i]. */ + if (self->core_enabled) { + PyArrayObject *ao = mps[i]; + mps[i] = (PyArrayObject *)mps[i]->base; + Py_DECREF(ao); + } } /* + * Use __array_prepare__ on all outputs + * if present on one of the input arguments. + * If present for multiple inputs: + * use __array_prepare__ of input object with largest + * __array_priority__ (default = 0.0) + * + * Exception: we should not wrap outputs for items already + * passed in as output-arguments. These items should either + * be left unwrapped or wrapped by calling their own __array_prepare__ + * routine. + * + * For each output argument, wrap will be either + * NULL --- call PyArray_Return() -- default if no output arguments given + * None --- array-object passed in don't call PyArray_Return + * method --- the __array_prepare__ method to call. + */ + _find_array_prepare(args, wraparr, loop->ufunc->nin, loop->ufunc->nout); + + /* wrap outputs */ + for (i = 0; i < loop->ufunc->nout; i++) { + int j = loop->ufunc->nin+i; + PyObject *wrap; + wrap = wraparr[i]; + if (wrap != NULL) { + if (wrap == Py_None) { + Py_DECREF(wrap); + continue; + } + PyObject *res = PyObject_CallFunction(wrap, "O(OOi)", + mps[j], loop->ufunc, args, i); + Py_DECREF(wrap); + if ((res == NULL) || (res == Py_None)) { + if (!PyErr_Occurred()){ + PyErr_SetString(PyExc_TypeError, + "__array_prepare__ must return an ndarray or subclass thereof"); + } + return -1; + } + Py_DECREF(mps[j]); + mps[j] = (PyArrayObject *)res; + } + } + + /* * If any of different type, or misaligned or swapped * then must use buffers */ @@ -3827,7 +3988,10 @@ ufunc_repr(PyUFuncObject *self) } -/* -------------------------------------------------------- */ +/****************************************************************************** + *** UFUNC METHODS *** + *****************************************************************************/ + /* * op.outer(a,b) is equivalent to op(a[:,NewAxis,NewAxis,etc.],b) @@ -3962,6 +4126,10 @@ static struct PyMethodDef ufunc_methods[] = { }; +/****************************************************************************** + *** UFUNC GETSET *** + *****************************************************************************/ + /* construct the string y1,y2,...,yn */ static PyObject * @@ -4000,7 +4168,8 @@ _typecharfromnum(int num) { static PyObject * ufunc_get_doc(PyUFuncObject *self) { - /* Put docstring first or FindMethod finds it... could so some + /* + * Put docstring first or FindMethod finds it... could so some * introspection on name and nin + nout to automate the first part * of it the doc string shouldn't need the calling convention * construct name(x1, x2, ...,[ out1, out2, ...]) __doc__ @@ -4148,65 +4317,68 @@ static PyGetSetDef ufunc_getset[] = { {NULL, NULL, NULL, NULL, NULL}, /* Sentinel */ }; + +/****************************************************************************** + *** UFUNC TYPE OBJECT *** + *****************************************************************************/ + NPY_NO_EXPORT PyTypeObject PyUFunc_Type = { - PyObject_HEAD_INIT(0) - 0, /* ob_size */ - "numpy.ufunc", /* tp_name */ - sizeof(PyUFuncObject), /* tp_basicsize */ - 0, /* tp_itemsize */ +#if defined(NPY_PY3K) + PyVarObject_HEAD_INIT(NULL, 0) +#else + PyObject_HEAD_INIT(NULL) + 0, /* ob_size */ +#endif + "numpy.ufunc", /* tp_name */ + sizeof(PyUFuncObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)ufunc_dealloc, /* tp_dealloc */ - (printfunc)0, /* tp_print */ - (getattrfunc)0, /* tp_getattr */ - (setattrfunc)0, /* tp_setattr */ - (cmpfunc)0, /* tp_compare */ - (reprfunc)ufunc_repr, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - (hashfunc)0, /* tp_hash */ - (ternaryfunc)ufunc_generic_call, /* tp_call */ - (reprfunc)ufunc_repr, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - NULL, /* tp_doc */ /* was Ufunctype__doc__ */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - ufunc_methods, /* tp_methods */ - 0, /* tp_members */ - ufunc_getset, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ - -#ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + (destructor)ufunc_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if defined(NPY_PY3K) + 0, /* tp_reserved */ +#else + 0, /* tp_compare */ #endif + (reprfunc)ufunc_repr, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + (ternaryfunc)ufunc_generic_call, /* tp_call */ + (reprfunc)ufunc_repr, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + ufunc_methods, /* tp_methods */ + 0, /* tp_members */ + ufunc_getset, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ }; /* End of code for ufunc objects */ -/* -------------------------------------------------------- */ diff --git a/numpy/core/tests/test_defmatrix.py b/numpy/core/tests/test_defmatrix.py index e9f0d9a7f..40728bd29 100644 --- a/numpy/core/tests/test_defmatrix.py +++ b/numpy/core/tests/test_defmatrix.py @@ -1,5 +1,6 @@ from numpy.testing import * from numpy.core import * +from numpy.core.defmatrix import matrix_power import numpy as np class TestCtor(TestCase): @@ -358,6 +359,15 @@ class TestNewScalarIndexing(TestCase): assert_array_equal(x[:,[1,0]],x[:,::-1]) assert_array_equal(x[[2,1,0],:],x[::-1,:]) +class TestPower(TestCase): + def test_returntype(self): + a = array([[0,1],[0,0]]) + assert type(matrix_power(a, 2)) is ndarray + a = mat(a) + assert type(matrix_power(a, 2)) is matrix + + def test_list(self): + assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]]) if __name__ == "__main__": run_module_suite() diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 7022ef14d..57f1bd4c6 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -4,6 +4,7 @@ import os import numpy as np from numpy.testing import * from numpy.core import * +from numpy.core.multiarray_tests import test_neighborhood_iterator, test_neighborhood_iterator_oob from test_print import in_foreign_locale @@ -279,6 +280,24 @@ class TestMethods(TestCase): self.failUnlessRaises(ValueError, lambda: a.transpose(0,1,2)) def test_sort(self): + # test ordering for floats and complex containing nans. It is only + # necessary to check the lessthan comparison, so sorts that + # only follow the insertion sort path are sufficient. We only + # test doubles and complex doubles as the logic is the same. + + # check doubles + msg = "Test real sort order with nans" + a = np.array([np.nan, 1, 0]) + b = sort(a) + assert_equal(b, a[::-1], msg) + # check complex + msg = "Test complex sort order with nans" + a = np.zeros(9, dtype=np.complex128) + a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] + a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] + b = sort(a) + assert_equal(b, a[::-1], msg) + # all c scalar sorts use the same code with different types # so it suffices to run a quick check with one type. The number # of sorted items must be greater than ~50 to check the actual @@ -466,6 +485,33 @@ class TestMethods(TestCase): a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode) assert_equal(a.argsort(kind='m'), r) + def test_searchsorted(self): + # test for floats and complex containing nans. The logic is the + # same for all float types so only test double types for now. + # The search sorted routines use the compare functions for the + # array type, so this checks if that is consistent with the sort + # order. + + # check double + a = np.array([np.nan, 1, 0]) + a = np.array([0, 1, np.nan]) + msg = "Test real searchsorted with nans, side='l'" + b = a.searchsorted(a, side='l') + assert_equal(b, np.arange(3), msg) + msg = "Test real searchsorted with nans, side='r'" + b = a.searchsorted(a, side='r') + assert_equal(b, np.arange(1,4), msg) + # check double complex + a = np.zeros(9, dtype=np.complex128) + a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan] + a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan] + msg = "Test complex searchsorted with nans, side='l'" + b = a.searchsorted(a, side='l') + assert_equal(b, np.arange(9), msg) + msg = "Test complex searchsorted with nans, side='r'" + b = a.searchsorted(a, side='r') + assert_equal(b, np.arange(1,10), msg) + def test_flatten(self): x0 = np.array([[1,2,3],[4,5,6]], np.int32) x1 = np.array([[[1,2],[3,4]],[[5,6],[7,8]]], np.int32) @@ -1058,6 +1104,282 @@ class TestChoose(TestCase): A = np.choose(self.ind, (self.x, self.y2)) assert_equal(A, [[2,2,3],[2,2,3]]) - +def can_use_decimal(): + try: + from decimal import Decimal + return True + except ImportError: + return False + +# TODO: test for multidimensional +NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} +class TestNeighborhoodIter(TestCase): + # Simple, 2d tests + def _test_simple2d(self, dt): + # Test zero and one padding for simple data type + x = np.array([[0, 1], [2, 3]], dtype=dt) + r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt), + np.array([[0, 0, 0], [0, 1, 0]], dtype=dt), + np.array([[0, 0, 1], [0, 2, 3]], dtype=dt), + np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)] + l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt), + np.array([[1, 1, 1], [0, 1, 1]], dtype=dt), + np.array([[1, 0, 1], [1, 2, 3]], dtype=dt), + np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)] + l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one']) + assert_array_equal(l, r) + + r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt), + np.array([[4, 4, 4], [0, 1, 4]], dtype=dt), + np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), + np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] + l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant']) + assert_array_equal(l, r) + + def test_simple2d(self): + self._test_simple2d(np.float) + + @dec.skipif(not can_use_decimal(), + "Skip neighborhood iterator tests for decimal objects " \ + "(decimal module not available") + def test_simple2d_object(self): + from decimal import Decimal + self._test_simple2d(Decimal) + + def _test_mirror2d(self, dt): + x = np.array([[0, 1], [2, 3]], dtype=dt) + r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), + np.array([[0, 1, 1], [0, 1, 1]], dtype=dt), + np.array([[0, 0, 1], [2, 2, 3]], dtype=dt), + np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)] + l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + def test_mirror2d(self): + self._test_mirror2d(np.float) + + @dec.skipif(not can_use_decimal(), + "Skip neighborhood iterator tests for decimal objects " \ + "(decimal module not available") + def test_mirror2d_object(self): + from decimal import Decimal + self._test_mirror2d(Decimal) + + # Simple, 1d tests + def _test_simple(self, dt): + # Test padding with constant values + x = np.linspace(1, 5, 5).astype(dt) + r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]] + l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]] + l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one']) + assert_array_equal(l, r) + + r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]] + l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant']) + assert_array_equal(l, r) + + def test_simple_float(self): + self._test_simple(np.float) + + @dec.skipif(not can_use_decimal(), + "Skip neighborhood iterator tests for decimal objects " \ + "(decimal module not available") + def test_simple_object(self): + from decimal import Decimal + self._test_simple(Decimal) + + # Test mirror modes + def _test_mirror(self, dt): + x = np.linspace(1, 5, 5).astype(dt) + r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], + [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) + l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror']) + self.failUnless([i.dtype == dt for i in l]) + assert_array_equal(l, r) + + def test_mirror(self): + self._test_mirror(np.float) + + @dec.skipif(not can_use_decimal(), + "Skip neighborhood iterator tests for decimal objects " \ + "(decimal module not available") + def test_mirror_object(self): + from decimal import Decimal + self._test_mirror(Decimal) + + # Circular mode + def _test_circular(self, dt): + x = np.linspace(1, 5, 5).astype(dt) + r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5], + [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt) + l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + def test_circular(self): + self._test_circular(np.float) + + @dec.skipif(not can_use_decimal(), + "Skip neighborhood iterator tests for decimal objects " \ + "(decimal module not available") + def test_circular_object(self): + from decimal import Decimal + self._test_circular(Decimal) + +# Test stacking neighborhood iterators +class TestStackedNeighborhoodIter(TestCase): + # Simple, 1d test: stacking 2 constant-padded neigh iterators + def test_simple_const(self): + dt = np.float64 + # Test zero and one padding for simple data type + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0], dtype=dt), + np.array([0], dtype=dt), + np.array([1], dtype=dt), + np.array([2], dtype=dt), + np.array([3], dtype=dt), + np.array([0], dtype=dt), + np.array([0], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'], + [0, 0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [np.array([1, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 1], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], + [-1, 1], NEIGH_MODE['one']) + assert_array_equal(l, r) + + # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and + # mirror padding + def test_simple_mirror(self): + dt = np.float64 + # Stacking zero on top of mirror + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 1], dtype=dt), + np.array([1, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 3], dtype=dt), + np.array([3, 3, 0], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'], + [-1, 1], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], + [-2, 0], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 2nd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 3], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], + [0, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 3rd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 0, 0, 1, 2], dtype=dt), + np.array([0, 0, 1, 2, 3], dtype=dt), + np.array([0, 1, 2, 3, 0], dtype=dt), + np.array([1, 2, 3, 0, 0], dtype=dt), + np.array([2, 3, 0, 0, 3], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], + [-2, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and + # circular padding + def test_simple_circular(self): + dt = np.float64 + # Stacking zero on top of mirror + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 3, 1], dtype=dt), + np.array([3, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 1], dtype=dt), + np.array([3, 1, 0], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'], + [-1, 1], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], + [-2, 0], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 2nd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], + [0, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 3rd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([3, 0, 0, 1, 2], dtype=dt), + np.array([0, 0, 1, 2, 3], dtype=dt), + np.array([0, 1, 2, 3, 0], dtype=dt), + np.array([1, 2, 3, 0, 0], dtype=dt), + np.array([2, 3, 0, 0, 1], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], + [-2, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator + # being strictly within the array + def test_simple_strict_within(self): + dt = np.float64 + # Stacking zero on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 0], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], + [-1, 2], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 3], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], + [-1, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 1], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], + [-1, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) if __name__ == "__main__": run_module_suite() diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 832b2893f..206c06e66 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -896,19 +896,20 @@ class _TestCorrelate(TestCase): def test_float(self): self._setup(np.float) - z = np.correlate(self.x, self.y, 'full') - assert_array_almost_equal(z, self.z1) - z = np.correlate(self.y, self.x, 'full') + z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior) assert_array_almost_equal(z, self.z1) + z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior) + assert_array_almost_equal(z, self.z2) def test_object(self): self._setup(Decimal) - z = np.correlate(self.x, self.y, 'full') - assert_array_almost_equal(z, self.z1) - z = np.correlate(self.y, self.x, 'full') + z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior) assert_array_almost_equal(z, self.z1) + z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior) + assert_array_almost_equal(z, self.z2) class TestCorrelate(_TestCorrelate): + old_behavior = True def _setup(self, dt): # correlate uses an unconventional definition so that correlate(a, b) # == correlate(b, a), so force the corresponding outputs to be the same @@ -916,6 +917,7 @@ class TestCorrelate(_TestCorrelate): _TestCorrelate._setup(self, dt) self.z2 = self.z1 + @dec.deprecated() def test_complex(self): x = np.array([1, 2, 3, 4+1j], dtype=np.complex) y = np.array([-1, -2j, 3+1j], dtype=np.complex) @@ -923,7 +925,16 @@ class TestCorrelate(_TestCorrelate): z = np.correlate(x, y, 'full') assert_array_almost_equal(z, r_z) -class TestAcorrelate(_TestCorrelate): + @dec.deprecated() + def test_float(self): + _TestCorrelate.test_float(self) + + @dec.deprecated() + def test_object(self): + _TestCorrelate.test_object(self) + +class TestCorrelateNew(_TestCorrelate): + old_behavior = False def test_complex(self): x = np.array([1, 2, 3, 4+1j], dtype=np.complex) y = np.array([-1, -2j, 3+1j], dtype=np.complex) @@ -932,8 +943,24 @@ class TestAcorrelate(_TestCorrelate): #assert_array_almost_equal(z, r_z) r_z = r_z[::-1].conjugate() - z = np.acorrelate(y, x, 'full') + z = np.correlate(y, x, 'full', old_behavior=self.old_behavior) assert_array_almost_equal(z, r_z) +class TestArgwhere: + def test_2D(self): + x = np.arange(6).reshape((2, 3)) + assert_array_equal(np.argwhere(x > 1), + [[0, 2], + [1, 0], + [1, 1], + [1, 2]]) + + def test_list(self): + assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) + + def test_masked_array(self): + a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) + assert_equal(np.argwhere(a), [[1], [3]]) + if __name__ == "__main__": run_module_suite() diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py index 4e0bb462b..56ed4dbb1 100644 --- a/numpy/core/tests/test_numerictypes.py +++ b/numpy/core/tests/test_numerictypes.py @@ -338,13 +338,13 @@ class TestEmptyField(TestCase): class TestCommonType(TestCase): def test_scalar_loses1(self): - res = np.find_common_type(['f4','f4','i4'],['f8']) + res = np.find_common_type(['f4','f4','i2'],['f8']) assert(res == 'f4') def test_scalar_loses2(self): res = np.find_common_type(['f4','f4'],['i8']) assert(res == 'f4') def test_scalar_wins(self): - res = np.find_common_type(['f4','f4','i4'],['c8']) + res = np.find_common_type(['f4','f4','i2'],['c8']) assert(res == 'c8') def test_scalar_wins2(self): res = np.find_common_type(['u4','i4','i4'],['f4']) diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 94b10edb1..abea0a222 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -10,6 +10,29 @@ class TestDivision(TestCase): assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80]) + def test_division_complex(self): + # check that implementation is correct + msg = "Complex division implementation check" + x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128) + assert_almost_equal(x**2/x, x, err_msg=msg) + # check overflow, underflow + msg = "Complex division overflow/underflow check" + x = np.array([1.e+110, 1.e-110], dtype=np.complex128) + y = x**2/x + assert_almost_equal(y/x, [1, 1], err_msg=msg) + + def test_floor_division_complex(self): + # check that implementation is correct + msg = "Complex floor division implementation check" + x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128) + y = np.array([0., -1., 0., 0.], dtype=np.complex128) + assert_equal(np.floor_divide(x**2,x), y, err_msg=msg) + # check overflow, underflow + msg = "Complex floor division overflow/underflow check" + x = np.array([1.e+110, 1.e-110], dtype=np.complex128) + y = np.floor_divide(x**2, x) + assert_equal(y, [1.e+110, 0], err_msg=msg) + class TestPower(TestCase): def test_power_float(self): x = np.array([1., 2., 3.]) @@ -42,7 +65,7 @@ class TestPower(TestCase): def assert_complex_equal(x, y): assert_array_equal(x.real, y.real) assert_array_equal(x.imag, y.imag) - + for z in [complex(0, np.inf), complex(1, np.inf)]: z = np.array([z], dtype=np.complex_) assert_complex_equal(z**1, z) @@ -87,7 +110,25 @@ class TestLogAddExp2(object): logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) logzf = np.array(z, dtype=dt) - assert_almost_equal(np.logaddexp(logxf, logyf), logzf) + assert_almost_equal(np.logaddexp2(logxf, logyf), logzf) + + def test_inf(self) : + inf = np.inf + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] + z = [inf, inf, inf, -inf, inf, inf, 1, 1] + for dt in ['f','d','g'] : + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_equal(np.logaddexp2(logxf, logyf), logzf) + + def test_nan(self): + assert np.isnan(np.logaddexp2(np.nan, np.inf)) + assert np.isnan(np.logaddexp2(np.inf, np.nan)) + assert np.isnan(np.logaddexp2(np.nan, 0)) + assert np.isnan(np.logaddexp2(0, np.nan)) + assert np.isnan(np.logaddexp2(np.nan, np.nan)) class TestLog(TestCase): def test_log_values(self) : @@ -130,6 +171,24 @@ class TestLogAddExp(object): logzf = np.array(z, dtype=dt) assert_almost_equal(np.logaddexp(logxf, logyf), logzf) + def test_inf(self) : + inf = np.inf + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] + z = [inf, inf, inf, -inf, inf, inf, 1, 1] + for dt in ['f','d','g'] : + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_equal(np.logaddexp(logxf, logyf), logzf) + + def test_nan(self): + assert np.isnan(np.logaddexp(np.nan, np.inf)) + assert np.isnan(np.logaddexp(np.inf, np.nan)) + assert np.isnan(np.logaddexp(np.nan, 0)) + assert np.isnan(np.logaddexp(0, np.nan)) + assert np.isnan(np.logaddexp(np.nan, np.nan)) + class TestLog1p(TestCase): def test_log1p(self): assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) @@ -140,6 +199,94 @@ class TestExpm1(TestCase): assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1) assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1) +class TestHypot(TestCase, object): + def test_simple(self): + assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2)) + assert_almost_equal(ncu.hypot(0, 0), 0) + +def assert_hypot_isnan(x, y): + assert np.isnan(ncu.hypot(x, y)) + +def assert_hypot_isinf(x, y): + assert np.isinf(ncu.hypot(x, y)) + +def test_hypot_special_values(): + yield assert_hypot_isnan, np.nan, np.nan + yield assert_hypot_isnan, np.nan, 1 + yield assert_hypot_isinf, np.nan, np.inf + yield assert_hypot_isinf, np.inf, np.nan + yield assert_hypot_isinf, np.inf, 0 + yield assert_hypot_isinf, 0, np.inf + +def test_arctan2_special_values(): + def assert_arctan2_isnan(x, y): + assert np.isnan(ncu.arctan2(x, y)) + + def assert_arctan2_ispinf(x, y): + assert np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0 + + def assert_arctan2_isninf(x, y): + assert np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0 + + def assert_arctan2_ispzero(x, y): + assert ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y)) + + def assert_arctan2_isnzero(x, y): + assert ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y)) + + # atan2(1, 1) returns pi/4. + yield assert_almost_equal, ncu.arctan2(1, 1), 0.25 * np.pi + yield assert_almost_equal, ncu.arctan2(-1, 1), -0.25 * np.pi + yield assert_almost_equal, ncu.arctan2(1, -1), 0.75 * np.pi + + # atan2(+-0, -0) returns +-pi. + yield assert_almost_equal, ncu.arctan2(np.PZERO, np.NZERO), np.pi + yield assert_almost_equal, ncu.arctan2(np.NZERO, np.NZERO), -np.pi + # atan2(+-0, +0) returns +-0. + yield assert_arctan2_ispzero, np.PZERO, np.PZERO + yield assert_arctan2_isnzero, np.NZERO, np.PZERO + + # atan2(+-0, x) returns +-pi for x < 0. + yield assert_almost_equal, ncu.arctan2(np.PZERO, -1), np.pi + yield assert_almost_equal, ncu.arctan2(np.NZERO, -1), -np.pi + + # atan2(+-0, x) returns +-0 for x > 0. + yield assert_arctan2_ispzero, np.PZERO, 1 + yield assert_arctan2_isnzero, np.NZERO, 1 + + # atan2(y, +-0) returns +pi/2 for y > 0. + yield assert_almost_equal, ncu.arctan2(1, np.PZERO), 0.5 * np.pi + yield assert_almost_equal, ncu.arctan2(1, np.NZERO), 0.5 * np.pi + + # atan2(y, +-0) returns -pi/2 for y < 0. + yield assert_almost_equal, ncu.arctan2(-1, np.PZERO), -0.5 * np.pi + yield assert_almost_equal, ncu.arctan2(-1, np.NZERO), -0.5 * np.pi + + # atan2(+-y, -infinity) returns +-pi for finite y > 0. + yield assert_almost_equal, ncu.arctan2(1, np.NINF), np.pi + yield assert_almost_equal, ncu.arctan2(-1, np.NINF), -np.pi + + # atan2(+-y, +infinity) returns +-0 for finite y > 0. + yield assert_arctan2_ispzero, 1, np.inf + yield assert_arctan2_isnzero, -1, np.inf + + # atan2(+-infinity, x) returns +-pi/2 for finite x. + yield assert_almost_equal, ncu.arctan2( np.inf, 1), 0.5 * np.pi + yield assert_almost_equal, ncu.arctan2(-np.inf, 1), -0.5 * np.pi + + # atan2(+-infinity, -infinity) returns +-3*pi/4. + yield assert_almost_equal, ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi + yield assert_almost_equal, ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi + + # atan2(+-infinity, +infinity) returns +-pi/4. + yield assert_almost_equal, ncu.arctan2( np.inf, np.inf), 0.25 * np.pi + yield assert_almost_equal, ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi + + # atan2(nan, x) returns nan for any x, including inf + yield assert_arctan2_isnan, np.nan, np.inf + yield assert_arctan2_isnan, np.inf, np.nan + yield assert_arctan2_isnan, np.nan, np.nan + class TestMaximum(TestCase): def test_reduce_complex(self): assert_equal(np.maximum.reduce([1,2j]),1) @@ -337,6 +484,38 @@ class TestSpecialMethods(TestCase): a = A() self.failUnlessRaises(RuntimeError, ncu.maximum, a, a) + def test_default_prepare(self): + class with_wrap(object): + __array_priority__ = 10 + def __array__(self): + return np.zeros(1) + def __array_wrap__(self, arr, context): + return arr + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x, np.zeros(1)) + assert_equal(type(x), np.ndarray) + + def test_prepare(self): + class with_prepare(np.ndarray): + __array_priority__ = 10 + def __array_prepare__(self, arr, context): + # make sure we can return a new + return np.array(arr).view(type=with_prepare) + a = np.array(1).view(type=with_prepare) + x = np.add(a, a) + assert_equal(x, np.array(2)) + assert_equal(type(x), with_prepare) + + def test_failing_prepare(self): + class A(object): + def __array__(self): + return np.zeros(1) + def __array_prepare__(self, arr, context=None): + raise RuntimeError + a = A() + self.failUnlessRaises(RuntimeError, ncu.maximum, a, a) + def test_array_with_context(self): class A(object): def __array__(self, dtype=None, context=None): @@ -637,6 +816,13 @@ def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, assert np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym) assert np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym) +def test_copysign(): + assert np.copysign(1, -1) == -1 + assert 1 / np.copysign(0, -1) < 0 + assert 1 / np.copysign(0, 1) > 0 + assert np.signbit(np.copysign(np.nan, -1)) + assert not np.signbit(np.copysign(np.nan, 1)) + def test_pos_nan(): """Check np.nan is a positive nan.""" assert np.signbit(np.nan) == 0 diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py index 10dec7373..4ed08d7f6 100644 --- a/numpy/distutils/__init__.py +++ b/numpy/distutils/__init__.py @@ -7,6 +7,7 @@ import ccompiler import unixccompiler from info import __doc__ +from npy_pkg_config import * try: import __config__ diff --git a/numpy/distutils/command/__init__.py b/numpy/distutils/command/__init__.py index dfe81d542..87546aeee 100644 --- a/numpy/distutils/command/__init__.py +++ b/numpy/distutils/command/__init__.py @@ -7,7 +7,7 @@ __revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $" distutils_all = [ 'build_py', 'clean', - 'install_lib', + 'install_clib', 'install_scripts', 'bdist', 'bdist_dumb', @@ -26,6 +26,7 @@ __all__ = ['build', 'install', 'install_data', 'install_headers', + 'install_lib', 'bdist_rpm', 'sdist', ] + distutils_all diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py index 9f6be52eb..88fa809c7 100644 --- a/numpy/distutils/command/build_clib.py +++ b/numpy/distutils/command/build_clib.py @@ -3,6 +3,7 @@ import os from glob import glob +import shutil from distutils.command.build_clib import build_clib as old_build_clib from distutils.errors import DistutilsSetupError, DistutilsError, \ DistutilsFileError @@ -27,11 +28,15 @@ class build_clib(old_build_clib): user_options = old_build_clib.user_options + [ ('fcompiler=', None, "specify the Fortran compiler type"), + ('inplace', 'i', 'Build in-place'), ] + boolean_options = old_build_clib.boolean_options + ['inplace'] + def initialize_options(self): old_build_clib.initialize_options(self) self.fcompiler = None + self.inplace = 0 return def have_f_sources(self): @@ -94,6 +99,14 @@ class build_clib(old_build_clib): self.build_libraries(self.libraries) + if self.inplace: + for l in self.distribution.installed_libraries: + libname = self.compiler.library_filename(l.name) + source = os.path.join(self.build_clib, libname) + target = os.path.join(l.target_dir, libname) + self.mkpath(l.target_dir) + shutil.copy(source, target) + def get_source_files(self): self.check_library_list(self.libraries) filenames = [] diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py index 2b114d4a7..4d11d033d 100644 --- a/numpy/distutils/command/build_ext.py +++ b/numpy/distutils/command/build_ext.py @@ -57,8 +57,20 @@ class build_ext (old_build_ext): self.run_command('build_src') if self.distribution.has_c_libraries(): - self.run_command('build_clib') - build_clib = self.get_finalized_command('build_clib') + if self.inplace: + if self.distribution.have_run.get('build_clib'): + log.warn('build_clib already run, it is too late to ' \ + 'ensure in-place build of build_clib') + else: + build_clib = self.distribution.get_command_obj('build_clib') + build_clib.inplace = 1 + build_clib.ensure_finalized() + build_clib.run() + self.distribution.have_run['build_clib'] = 1 + + else: + self.run_command('build_clib') + build_clib = self.get_finalized_command('build_clib') self.library_dirs.append(build_clib.build_clib) else: build_clib = None diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py index 4ba3f0a9a..818bd52fd 100644 --- a/numpy/distutils/command/build_src.py +++ b/numpy/distutils/command/build_src.py @@ -5,6 +5,7 @@ import os import re import sys import shlex +import copy from distutils.command import build_ext from distutils.dep_util import newer_group, newer @@ -22,10 +23,29 @@ except ImportError: #import numpy.f2py from numpy.distutils import log from numpy.distutils.misc_util import fortran_ext_match, \ - appendpath, is_string, is_sequence + appendpath, is_string, is_sequence, get_cmd from numpy.distutils.from_template import process_file as process_f_file from numpy.distutils.conv_template import process_file as process_c_file +def subst_vars(target, source, d): + """Substitute any occurence of @foo@ by d['foo'] from source file into + target.""" + var = re.compile('@([a-zA-Z_]+)@') + fs = open(source, 'r') + try: + ft = open(target, 'w') + try: + for l in fs.readlines(): + m = var.search(l) + if m: + ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) + else: + ft.write(l) + finally: + ft.close() + finally: + fs.close() + class build_src(build_ext.build_ext): description = "build sources from SWIG, F2PY files or a function" @@ -125,6 +145,7 @@ class build_src(build_ext.build_ext): setattr(self, c, v) def run(self): + log.info("build_src") if not (self.extensions or self.libraries): return self.build_sources() @@ -147,6 +168,7 @@ class build_src(build_ext.build_ext): self.build_extension_sources(ext) self.build_data_files_sources() + self.build_npy_pkg_config() def build_data_files_sources(self): if not self.data_files: @@ -183,6 +205,61 @@ class build_src(build_ext.build_ext): raise TypeError(repr(data)) self.data_files[:] = new_data_files + + def _build_npy_pkg_config(self, info, gd): + import shutil + template, install_dir, subst_dict = info + template_dir = os.path.dirname(template) + for k, v in gd.items(): + subst_dict[k] = v + + if self.inplace == 1: + generated_dir = os.path.join(template_dir, install_dir) + else: + generated_dir = os.path.join(self.build_src, template_dir, + install_dir) + generated = os.path.basename(os.path.splitext(template)[0]) + generated_path = os.path.join(generated_dir, generated) + if not os.path.exists(generated_dir): + os.makedirs(generated_dir) + + subst_vars(generated_path, template, subst_dict) + + # Where to install relatively to install prefix + full_install_dir = os.path.join(template_dir, install_dir) + return full_install_dir, generated_path + + def build_npy_pkg_config(self): + log.info('build_src: building npy-pkg config files') + + # XXX: another ugly workaround to circumvent distutils brain damage. We + # need the install prefix here, but finalizing the options of the + # install command when only building sources cause error. Instead, we + # copy the install command instance, and finalize the copy so that it + # does not disrupt how distutils want to do things when with the + # original install command instance. + install_cmd = copy.copy(get_cmd('install')) + if not install_cmd.finalized == 1: + install_cmd.finalize_options() + build_npkg = False + gd = {} + if hasattr(install_cmd, 'install_libbase'): + top_prefix = install_cmd.install_libbase + build_npkg = True + elif self.inplace == 1: + top_prefix = '.' + build_npkg = True + + if build_npkg: + for pkg, infos in self.distribution.installed_pkg_config.items(): + pkg_path = self.distribution.package_dir[pkg] + prefix = os.path.join(os.path.abspath(top_prefix), pkg_path) + d = {'prefix': prefix} + for info in infos: + install_dir, generated = self._build_npy_pkg_config(info, d) + self.distribution.data_files.append((install_dir, + [generated])) + def build_py_modules_sources(self): if not self.py_modules: return diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py index 00821d260..dbf8e77a1 100644 --- a/numpy/distutils/command/config.py +++ b/numpy/distutils/command/config.py @@ -166,6 +166,35 @@ int main() return self.try_compile(body, headers, include_dirs) + def check_type(self, type_name, headers=None, include_dirs=None, + library_dirs=None): + """Check type availability. Return True if the type can be compiled, + False otherwise""" + self._check_compiler() + + # First check the type can be compiled + body = r""" +int main() { + if ((%(name)s *) 0) + return 0; + if (sizeof (%(name)s)) + return 0; +} +""" % {'name': type_name} + + st = False + try: + try: + self._compile(body % {'type': type_name}, + headers, include_dirs, 'c') + st = True + except distutils.errors.CompileError, e: + st = False + finally: + self._clean() + + return st + def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): """Check size of a given type.""" self._check_compiler() diff --git a/numpy/distutils/command/install.py b/numpy/distutils/command/install.py index 87b549da8..099ad5c16 100644 --- a/numpy/distutils/command/install.py +++ b/numpy/distutils/command/install.py @@ -10,6 +10,9 @@ from distutils.file_util import write_file class install(old_install): + # Always run install_clib - the command is cheap, so no need to bypass it + sub_commands = old_install.sub_commands + [('install_clib', lambda x: True)] + def finalize_options (self): old_install.finalize_options(self) self.install_lib = self.install_libbase diff --git a/numpy/distutils/command/install_clib.py b/numpy/distutils/command/install_clib.py new file mode 100644 index 000000000..638d4beac --- /dev/null +++ b/numpy/distutils/command/install_clib.py @@ -0,0 +1,37 @@ +import os +from distutils.core import Command +from distutils.ccompiler import new_compiler +from numpy.distutils.misc_util import get_cmd + +class install_clib(Command): + description = "Command to install installable C libraries" + + user_options = [] + + def initialize_options(self): + self.install_dir = None + self.outfiles = [] + + def finalize_options(self): + self.set_undefined_options('install', ('install_lib', 'install_dir')) + + def run (self): + build_clib_cmd = get_cmd("build_clib") + build_dir = build_clib_cmd.build_clib + + # We need the compiler to get the library name -> filename association + if not build_clib_cmd.compiler: + compiler = new_compiler(compiler=None) + compiler.customize(self.distribution) + else: + compiler = build_clib_cmd.compiler + + for l in self.distribution.installed_libraries: + target_dir = os.path.join(self.install_dir, l.target_dir) + name = compiler.library_filename(l.name) + source = os.path.join(build_dir, name) + self.mkpath(target_dir) + self.outfiles.append(self.copy_file(source, target_dir)[0]) + + def get_outputs(self): + return self.outfiles diff --git a/numpy/distutils/command/scons.py b/numpy/distutils/command/scons.py index f733dc4b2..28fb5cb03 100644 --- a/numpy/distutils/command/scons.py +++ b/numpy/distutils/command/scons.py @@ -10,7 +10,7 @@ from numpy.distutils.ccompiler import CCompiler from numpy.distutils.fcompiler import FCompiler from numpy.distutils.exec_command import find_executable from numpy.distutils import log -from numpy.distutils.misc_util import is_bootstrapping +from numpy.distutils.misc_util import is_bootstrapping, get_cmd def get_scons_build_dir(): """Return the top path where everything produced by scons will be put. @@ -45,14 +45,34 @@ def get_scons_local_path(): from numscons import get_scons_path return get_scons_path() -def get_distutils_libdir(cmd, pkg): - """Returns the path where distutils install libraries, relatively to the - scons build directory.""" +def _get_top_dir(pkg): + # XXX: this mess is necessary because scons is launched per package, and + # has no knowledge outside its build dir, which is package dependent. If + # one day numscons does not launch one process/package, this will be + # unnecessary. from numscons import get_scons_build_dir from numscons.core.utils import pkg_to_path scdir = pjoin(get_scons_build_dir(), pkg_to_path(pkg)) n = scdir.count(os.sep) - return pjoin(os.sep.join([os.pardir for i in range(n+1)]), cmd.build_lib) + return os.sep.join([os.pardir for i in range(n+1)]) + +def get_distutils_libdir(cmd, pkg): + """Returns the path where distutils install libraries, relatively to the + scons build directory.""" + return pjoin(_get_top_dir(pkg), cmd.build_lib) + +def get_distutils_clibdir(cmd, pkg): + """Returns the path where distutils put pure C libraries.""" + return pjoin(_get_top_dir(pkg), cmd.build_temp) + +def get_distutils_install_prefix(pkg, inplace): + """Returns the installation path for the current package.""" + from numscons.core.utils import pkg_to_path + if inplace == 1: + return pkg_to_path(pkg) + else: + install_cmd = get_cmd('install').get_finalized_command('install') + return pjoin(install_cmd.install_libbase, pkg_to_path(pkg)) def get_python_exec_invoc(): """This returns the python executable from which this file is invocated.""" @@ -352,28 +372,26 @@ class scons(old_build_ext): "this package " % str(e)) try: - minver = "0.9.3" - try: - # version_info was added in 0.10.0 - from numscons import version_info - except ImportError: - from numscons import get_version - if get_version() < minver: - raise ValueError() + minver = [0, 10, 2] + # version_info was added in 0.10.0 + from numscons import version_info + # Stupid me used string instead of numbers in version_info in + # dev versions of 0.10.0 + if isinstance(version_info[0], str): + raise ValueError("Numscons %s or above expected " \ + "(detected 0.10.0)" % str(minver)) + if version_info[:3] < minver: + raise ValueError("Numscons %s or above expected (got %s) " + % (str(minver), str(version_info))) except ImportError: raise RuntimeError("You need numscons >= %s to build numpy "\ "with numscons (imported numscons path " \ "is %s)." % (minver, numscons.__file__)) - except ValueError: - raise RuntimeError("You need numscons >= %s to build numpy "\ - "with numscons (detected %s )" \ - % (minver, get_version())) else: # nothing to do, just leave it here. return - print "is bootstrapping ? %s" % is_bootstrapping() # XXX: when a scons script is missing, scons only prints warnings, and # does not return a failure (status is 0). We have to detect this from # distutils (this cannot work for recursive scons builds...) @@ -422,6 +440,10 @@ class scons(old_build_ext): # pdirname(sconscript)))) cmd.append('distutils_libdir=%s' % protect_path(get_distutils_libdir(self, pkg_name))) + cmd.append('distutils_clibdir=%s' % + protect_path(get_distutils_clibdir(self, pkg_name))) + prefix = get_distutils_install_prefix(pkg_name, self.inplace) + cmd.append('distutils_install_prefix=%s' % protect_path(prefix)) if not self._bypass_distutils_cc: cmd.append('cc_opt=%s' % self.scons_compiler) diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py index f8a01c797..097df6fac 100644 --- a/numpy/distutils/conv_template.py +++ b/numpy/distutils/conv_template.py @@ -168,13 +168,13 @@ def parse_loop_header(loophead) : nsub = size elif nsub != size : msg = "Mismatch in number of values:\n%s = %s" % (name, vals) - raise ValueError, msg + raise ValueError(msg) names.append((name,vals)) # generate list of dictionaries, one for each template iteration dlist = [] if nsub is None : - raise ValueError, "No substitution variables found" + raise ValueError("No substitution variables found") for i in range(nsub) : tmp = {} for name,vals in names : @@ -192,8 +192,8 @@ def parse_string(astr, env, level, line) : try : val = env[name] except KeyError, e : - msg = 'line %d: %s'%(line, e) - raise ValueError, msg + msg = 'line %d: no definition of key "%s"'%(line, name) + raise ValueError(msg) return val code = [lineno] @@ -213,7 +213,7 @@ def parse_string(astr, env, level, line) : envlist = parse_loop_header(head) except ValueError, e : msg = "line %d: %s" % (newline, e) - raise ValueError, msg + raise ValueError(msg) for newenv in envlist : newenv.update(env) newcode = parse_string(text, newenv, newlevel, newline) @@ -261,7 +261,7 @@ def process_file(source): try: code = process_str(''.join(lines)) except ValueError, e: - raise ValueError, '"%s", %s' % (sourcefile, e) + raise ValueError('In "%s" loop at %s' % (sourcefile, e)) return '#line 1 "%s"\n%s' % (sourcefile, code) @@ -299,5 +299,5 @@ if __name__ == "__main__": try: writestr = process_str(allstr) except ValueError, e: - raise ValueError, "file %s, %s" % (file, e) + raise ValueError("In %s loop at %s" % (file, e)) outfile.write(writestr) diff --git a/numpy/distutils/core.py b/numpy/distutils/core.py index 20fc3eac1..8481640bd 100644 --- a/numpy/distutils/core.py +++ b/numpy/distutils/core.py @@ -24,7 +24,8 @@ from numpy.distutils.extension import Extension from numpy.distutils.numpy_distribution import NumpyDistribution from numpy.distutils.command import config, config_compiler, \ build, build_py, build_ext, build_clib, build_src, build_scripts, \ - sdist, install_data, install_headers, install, bdist_rpm, scons + sdist, install_data, install_headers, install, bdist_rpm, scons, \ + install_clib from numpy.distutils.misc_util import get_data_files, is_sequence, is_string numpy_cmdclass = {'build': build.build, @@ -40,6 +41,7 @@ numpy_cmdclass = {'build': build.build, 'scons': scons.scons, 'install_data': install_data.install_data, 'install_headers': install_headers.install_headers, + 'install_clib': install_clib.install_clib, 'install': install.install, 'bdist_rpm': bdist_rpm.bdist_rpm, } diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py index 8e25d1cb0..3c1da28da 100644 --- a/numpy/distutils/fcompiler/gnu.py +++ b/numpy/distutils/fcompiler/gnu.py @@ -2,6 +2,8 @@ import re import os import sys import warnings +import platform +from subprocess import Popen, PIPE, STDOUT from numpy.distutils.cpuinfo import cpu from numpy.distutils.fcompiler import FCompiler @@ -19,6 +21,16 @@ _R_ARCHS = {"ppc": r"^Target: (powerpc-.*)$", "x86_64": r"^Target: (i686-.*)$", "ppc64": r"^Target: (powerpc-.*)$",} +# XXX: handle cross compilation +def is_win64(): + return sys.platform == "win32" and platform.architecture()[0] == "64bit" + +if is_win64(): + #_EXTRAFLAGS = ["-fno-leading-underscore"] + _EXTRAFLAGS = [] +else: + _EXTRAFLAGS = [] + class GnuFCompiler(FCompiler): compiler_type = 'gnu' compiler_aliases = ('g77',) @@ -220,10 +232,10 @@ class Gnu95FCompiler(GnuFCompiler): executables = { 'version_cmd' : ["<F90>", "--version"], 'compiler_f77' : [None, "-Wall", "-ffixed-form", - "-fno-second-underscore"], - 'compiler_f90' : [None, "-Wall", "-fno-second-underscore"], + "-fno-second-underscore"] + _EXTRAFLAGS, + 'compiler_f90' : [None, "-Wall", "-fno-second-underscore"] + _EXTRAFLAGS, 'compiler_fix' : [None, "-Wall", "-ffixed-form", - "-fno-second-underscore"], + "-fno-second-underscore"] + _EXTRAFLAGS, 'linker_so' : ["<F90>", "-Wall"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"], @@ -291,6 +303,13 @@ class Gnu95FCompiler(GnuFCompiler): i = opt.index("gcc") opt.insert(i+1, "mingwex") opt.insert(i+1, "mingw32") + # XXX: fix this mess, does not work for mingw + if is_win64(): + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + return [] + else: + raise NotImplementedError("Only MS compiler supported with gfortran on win64") return opt def get_target(self): @@ -303,12 +322,19 @@ class Gnu95FCompiler(GnuFCompiler): return m.group(1) return "" + def get_flags_opt(self): + if is_win64(): + return ['-O0'] + else: + return GnuFCompiler.get_flags_opt(self) def _can_target(cmd, arch): """Return true is the command supports the -arch flag for the given architecture.""" newcmd = cmd[:] newcmd.extend(["-arch", arch, "-v"]) - st, out = exec_command(" ".join(newcmd)) + p = Popen(newcmd, stderr=STDOUT, stdout=PIPE) + st = p.communicate() + out = p.stdout if st == 0: for line in out.splitlines(): m = re.search(_R_ARCHS[arch], line) diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py index b3928015c..f72827332 100644 --- a/numpy/distutils/fcompiler/intel.py +++ b/numpy/distutils/fcompiler/intel.py @@ -122,7 +122,7 @@ class IntelItaniumFCompiler(IntelFCompiler): compiler_aliases = () description = 'Intel Fortran Compiler for Itanium apps' - version_match = intel_version_match('Itanium') + version_match = intel_version_match('Itanium|IA-64') #Intel(R) Fortran Itanium(R) Compiler for Itanium(R)-based applications #Version 9.1 Build 20060928 Package ID: l_fc_c_9.1.039 diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index ecd60375e..b44e7db30 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -23,7 +23,13 @@ __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', 'get_script_files', 'get_lib_source_files', 'get_data_files', 'dot_join', 'get_frame', 'minrelpath','njoin', 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', - 'quote_args', 'get_build_architecture'] + 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info'] + +class InstallableLib: + def __init__(self, name, build_info, target_dir): + self.name = name + self.build_info = build_info + self.target_dir = target_dir def quote_args(args): # don't used _nt_quote_args as it does not check if @@ -589,8 +595,9 @@ def get_frame(level=0): class Configuration(object): _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', - 'libraries', 'headers', 'scripts', 'py_modules', 'scons_data'] - _dict_keys = ['package_dir'] + 'libraries', 'headers', 'scripts', 'py_modules', 'scons_data', + 'installed_libraries'] + _dict_keys = ['package_dir', 'installed_pkg_config'] _extra_keys = ['name', 'version'] numpy_include_dirs = [] @@ -693,9 +700,15 @@ class Configuration(object): self.setup_name = setup_name def todict(self): - """Return configuration distionary suitable for passing - to distutils.core.setup() function. """ + Return a dictionary compatible with the keyword arguments of distutils + setup function. + + Example + ------- + >>> setup(\**config.todict()). + """ + self._optimize_data_files() d = {} known_keys = self.list_keys + self.dict_keys + self.extra_keys @@ -728,6 +741,7 @@ class Configuration(object): raise ValueError,'Unknown option: '+key def get_distribution(self): + """Return the distutils distribution object for self.""" from numpy.distutils.core import get_distribution return get_distribution() @@ -793,7 +807,17 @@ class Configuration(object): caller_level = 1): """Return list of subpackage configurations. - '*' in subpackage_name is handled as a wildcard. + Parameters + ---------- + subpackage_name: str,None + Name of the subpackage to get the configuration. '*' in + subpackage_name is handled as a wildcard. + subpackage_path: str + If None, then the path is assumed to be the local path plus the + subpackage_name. If a setup.py file is not found in the + subpackage_path, then a default configuration is used. + parent_name: str + Parent name. """ if subpackage_name is None: if subpackage_path is None: @@ -841,8 +865,22 @@ class Configuration(object): def add_subpackage(self,subpackage_name, subpackage_path=None, standalone = False): - """Add subpackage to configuration. + """Add a sub-package to the current Configuration instance. + + This is useful in a setup.py script for adding sub-packages to a + package. + + Parameters + ---------- + subpackage_name: str + name of the subpackage + subpackage_path: str + if given, the subpackage path such as the subpackage is in + subpackage_path / subpackage_name. If None,the subpackage is + assumed to be located in the local path / subpackage_name. + standalone: bool """ + if standalone: parent_name = None else: @@ -869,11 +907,24 @@ class Configuration(object): def add_data_dir(self,data_path): """Recursively add files under data_path to data_files list. - Argument can be either - - 2-sequence (<datadir suffix>,<path to data directory>) - - path to data directory where python datadir suffix defaults - to package dir. + Recursively add files under data_path to the list of data_files to be + installed (and distributed). The data_path can be either a relative + path-name, or an absolute path-name, or a 2-tuple where the first + argument shows where in the install directory the data directory + should be installed to. + + Parameters + ---------- + data_path: seq,str + Argument can be either + + * 2-sequence (<datadir suffix>,<path to data directory>) + * path to data directory where python datadir suffix defaults + to package dir. + + Notes + ----- Rules for installation paths: foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar (gun, foo/bar) -> parent/gun @@ -883,6 +934,30 @@ class Configuration(object): /foo/bar -> (bar, /foo/bar) -> parent/bar (gun, /foo/bar) -> parent/gun (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar + + Examples + -------- + For example suppose the source directory contains fun/foo.dat and + fun/bar/car.dat:: + + >>> self.add_data_dir('fun') + >>> self.add_data_dir(('sun', 'fun')) + >>> self.add_data_dir(('gun', '/full/path/to/fun')) + + Will install data-files to the locations:: + + <package install directory>/ + fun/ + foo.dat + bar/ + car.dat + sun/ + foo.dat + bar/ + car.dat + gun/ + foo.dat + car.dat """ if is_sequence(data_path): d, data_path = data_path @@ -960,24 +1035,90 @@ class Configuration(object): def add_data_files(self,*files): """Add data files to configuration data_files. - Argument(s) can be either - - 2-sequence (<datadir prefix>,<path to data file(s)>) - - paths to data files where python datadir prefix defaults - to package dir. + + Parameters + ---------- + files: sequence + Argument(s) can be either + + * 2-sequence (<datadir prefix>,<path to data file(s)>) + * paths to data files where python datadir prefix defaults + to package dir. + + Notes + ----- + The form of each element of the files sequence is very flexible + allowing many combinations of where to get the files from the package + and where they should ultimately be installed on the system. The most + basic usage is for an element of the files argument sequence to be a + simple filename. This will cause that file from the local path to be + installed to the installation path of the self.name package (package + path). The file argument can also be a relative path in which case the + entire relative path will be installed into the package directory. + Finally, the file can be an absolute path name in which case the file + will be found at the absolute path name but installed to the package + path. + + This basic behavior can be augmented by passing a 2-tuple in as the + file argument. The first element of the tuple should specify the + relative path (under the package install directory) where the + remaining sequence of files should be installed to (it has nothing to + do with the file-names in the source distribution). The second element + of the tuple is the sequence of files that should be installed. The + files in this sequence can be filenames, relative paths, or absolute + paths. For absolute paths the file will be installed in the top-level + package installation directory (regardless of the first argument). + Filenames and relative path names will be installed in the package + install directory under the path name given as the first element of + the tuple. Rules for installation paths: - file.txt -> (., file.txt)-> parent/file.txt - foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt - /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt - *.txt -> parent/a.txt, parent/b.txt - foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt - */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt - (sun, file.txt) -> parent/sun/file.txt - (sun, bar/file.txt) -> parent/sun/file.txt - (sun, /foo/bar/file.txt) -> parent/sun/file.txt - (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt - (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt - (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt + + #. file.txt -> (., file.txt)-> parent/file.txt + #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt + #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt + #. *.txt -> parent/a.txt, parent/b.txt + #. foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt + #. */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt + #. (sun, file.txt) -> parent/sun/file.txt + #. (sun, bar/file.txt) -> parent/sun/file.txt + #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt + #. (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt + #. (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt + #. (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt + + An additional feature is that the path to a data-file can actually be + a function that takes no arguments and returns the actual path(s) to + the data-files. This is useful when the data files are generated while + building the package. + + Examples + -------- + Add files to the list of data_files to be included with the package. + + >>> self.add_data_files('foo.dat', + ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), + 'bar/cat.dat', + '/full/path/to/can.dat') + + will install these data files to:: + + <package install directory>/ + foo.dat + fun/ + gun.dat + nun/ + pun.dat + sun.dat + bar/ + car.dat + can.dat + + where <package install directory> is the package (or sub-package) + directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: + \\Python2.4 \\Lib \\site-packages \\mypackage') or + '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: + \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). """ if len(files)>1: @@ -1047,6 +1188,10 @@ class Configuration(object): def add_include_dirs(self,*paths): """Add paths to configuration include directories. + + Add the given sequence of paths to the beginning of the include_dirs + list. This list will be visible to all extension modules of the + current package. """ include_dirs = self.paths(paths) dist = self.get_distribution() @@ -1061,10 +1206,21 @@ class Configuration(object): def add_headers(self,*files): """Add installable headers to configuration. - Argument(s) can be either - - 2-sequence (<includedir suffix>,<path to header file(s)>) - - path(s) to header file(s) where python includedir suffix will default - to package name. + + Add the given sequence of files to the beginning of the headers list. + By default, headers will be installed under <python- + include>/<self.name.replace('.','/')>/ directory. If an item of files + is a tuple, then its first argument specifies the actual installation + location relative to the <python-include> path. + + Parameters + ---------- + files: str, seq + Argument(s) can be either: + + * 2-sequence (<includedir suffix>,<path to header file(s)>) + * path(s) to header file(s) where python includedir suffix will + default to package name. """ headers = [] for path in files: @@ -1082,6 +1238,13 @@ class Configuration(object): def paths(self,*paths,**kws): """Apply glob to paths and prepend local_path if needed. + + Applies glob.glob(...) to each path in the sequence (if needed) and + pre-pends the local_path if needed. Because this is called on all + source lists, this allows wildcard characters to be specified in lists + of sources for extension modules and libraries and scripts and allows + path-names be relative to the source directory. + """ include_non_existing = kws.get('include_non_existing',True) return gpaths(paths, @@ -1099,14 +1262,48 @@ class Configuration(object): def add_extension(self,name,sources,**kw): """Add extension to configuration. - Keywords: - include_dirs, define_macros, undef_macros, - library_dirs, libraries, runtime_library_dirs, - extra_objects, extra_compile_args, extra_link_args, - export_symbols, swig_opts, depends, language, - f2py_options, module_dirs - extra_info - dict or list of dict of keywords to be - appended to keywords. + Create and add an Extension instance to the ext_modules list. This + method also takes the following optional keyword arguments that are + passed on to the Extension constructor. + + Parameters + ---------- + name: str + name of the extension + sources: seq + list of the sources. The list of sources may contain functions + (called source generators) which must take an extension instance + and a build directory as inputs and return a source file or list of + source files or None. If None is returned then no sources are + generated. If the Extension instance has no sources after + processing all source generators, then no extension module is + built. + include_dirs: + define_macros: + undef_macros: + library_dirs: + libraries: + runtime_library_dirs: + extra_objects: + extra_compile_args: + extra_link_args: + export_symbols: + swig_opts: + depends: + The depends list contains paths to files or directories that the + sources of the extension module depend on. If any path in the + depends list is newer than the extension module, then the module + will be rebuilt. + language: + f2py_options: + module_dirs: + extra_info: dict,list + dict or list of dict of keywords to be appended to keywords. + + Notes + ----- + The self.paths(...) method is applied to all lists that may contain + paths. """ ext_args = copy.copy(kw) ext_args['name'] = dot_join(self.name,name) @@ -1164,14 +1361,38 @@ class Configuration(object): def add_library(self,name,sources,**build_info): """Add library to configuration. - Valid keywords for build_info: - depends - macros - include_dirs - extra_compiler_args - f2py_options - language + Parameters + ---------- + name: str + name of the extension + sources: seq + list of the sources. The list of sources may contain functions + (called source generators) which must take an extension instance + and a build directory as inputs and return a source file or list of + source files or None. If None is returned then no sources are + generated. If the Extension instance has no sources after + processing all source generators, then no extension module is + built. + build_info: dict + The following keys are allowed: + + * depends + * macros + * include_dirs + * extra_compiler_args + * f2py_options + * language """ + self._add_library(name, sources, None, build_info) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add a library '+ name) + + def _add_library(self, name, sources, install_dir, build_info): + """Common implementation for add_library and add_installed_library. Do + not use directly""" build_info = copy.copy(build_info) name = name #+ '__OF__' + self.name build_info['sources'] = sources @@ -1183,12 +1404,116 @@ class Configuration(object): self._fix_paths_dict(build_info) - self.libraries.append((name,build_info)) + # Add to libraries list so that it is build with build_clib + self.libraries.append((name, build_info)) + + def add_installed_library(self, name, sources, install_dir, build_info=None): + """Similar to add_library, but the corresponding library is installed. + + Most C libraries are only used to build python extensions, but + libraries built through this method will be installed so that they can + be reused by third-party. install_dir is relative to the current + subpackage. + + Parameters + ---------- + name: str + name of the installed library + sources: seq + list of source files of the library + install_dir: str + path where to install the library (relatively to the current + sub-package) + + See also + -------- + add_library, add_npy_pkg_config, get_info + + Notes + ----- + The best way to encode the necessary options to link against those C + libraries is to use a libname.ini file, and use get_info to retrieve + those informations (see add_npy_pkg_config method for more + information). + """ + if not build_info: + build_info = {} + + install_dir = os.path.join(self.package_path, install_dir) + self._add_library(name, sources, install_dir, build_info) + self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) + + def add_npy_pkg_config(self, template, install_dir, subst_dict=None): + """Generate a npy-pkg config file from the template, and install it in + given install directory, using subst_dict for variable substitution. + + Parameters + ---------- + template: str + the path of the template, relatively to the current package path + install_dir: str + where to install the npy-pkg config file, relatively to the current + package path + subst_dict: dict (None by default) + if given, any string of the form @key@ will be replaced by + subst_dict[key] in the template file when installed. The install + prefix is always available through the variable @prefix@, since the + install prefix is not easy to get reliably from setup.py. + + See also + -------- + add_installed_library, get_info + + Notes + ----- + This works for both standard installs and in-place builds, i.e. the + @prefix@ refer to the source directory for in-place builds. + + Examples + -------- + config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) + + Assuming the foo.ini.in file has the following content:: + + [meta] + Name=@foo@ + Version=1.0 + Description=dummy description + + [default] + Cflags=-I@prefix@/include + Libs= + + The generated file will have the following content:: + + [meta] + Name=bar + Version=1.0 + Description=dummy description + + [default] + Cflags=-Iprefix_dir/include + Libs= + + and will be installed as foo.ini in the 'lib' subpath. + """ + if subst_dict is None: + subst_dict = {} + basename = os.path.splitext(template)[0] + template = os.path.join(self.package_path, template) + + if self.installed_pkg_config.has_key(self.name): + self.installed_pkg_config[self.name].append((template, install_dir, + subst_dict)) + else: + self.installed_pkg_config[self.name] = [(template, install_dir, + subst_dict)] - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a library '+ name) + def add_scons_installed_library(self, name, install_dir): + """Add an scons-built installable library to distutils. + """ + install_dir = os.path.join(self.package_path, install_dir) + self.installed_libraries.append(InstallableLib(name, {}, install_dir)) def add_sconscript(self, sconscript, subpackage_path=None, standalone = False, pre_hook = None, @@ -1240,6 +1565,10 @@ class Configuration(object): def add_scripts(self,*files): """Add scripts to configuration. + + Add the sequence of files to the beginning of the scripts list. + Scripts will be installed under the <prefix>/bin/ directory. + """ scripts = self.paths(files) dist = self.get_distribution() @@ -1287,6 +1616,9 @@ class Configuration(object): return s def get_config_cmd(self): + """ + Returns the numpy.distutils config command instance. + """ cmd = get_cmd('config') cmd.ensure_finalized() cmd.dump_source = 0 @@ -1298,14 +1630,24 @@ class Configuration(object): return cmd def get_build_temp_dir(self): + """ + Return a path to a temporary directory where temporary files should be + placed. + """ cmd = get_cmd('build') cmd.ensure_finalized() return cmd.build_temp def have_f77c(self): """Check for availability of Fortran 77 compiler. + Use it inside source generating function to ensure that setup distribution instance has been initialized. + + Notes + ----- + True if a Fortran 77 compiler is available (because a simple Fortran 77 + code was able to be compiled successfully). """ simple_fortran_subroutine = ''' subroutine simple @@ -1317,8 +1659,14 @@ class Configuration(object): def have_f90c(self): """Check for availability of Fortran 90 compiler. + Use it inside source generating function to ensure that setup distribution instance has been initialized. + + Notes + ----- + True if a Fortran 90 compiler is available (because a simple Fortran + 90 code was able to be compiled successfully) """ simple_fortran_subroutine = ''' subroutine simple @@ -1378,6 +1726,16 @@ class Configuration(object): def get_version(self, version_file=None, version_variable=None): """Try to get version string of a package. + + Return a version string of the current package or None if the version + information could not be detected. + + Notes + ----- + This method scans files named + __version__.py, <packagename>_version.py, version.py, and + __svn_version__.py for string variables version, __version\__, and + <packagename>_version, until a version number is found. """ version = getattr(self,'version',None) if version is not None: @@ -1431,11 +1789,20 @@ class Configuration(object): return version def make_svn_version_py(self, delete=True): - """Generate package __svn_version__.py file from SVN revision number, + """Appends a data function to the data_files list that will generate + __svn_version__.py file to the current package directory. + + Generate package __svn_version__.py file from SVN revision number, it will be removed after python exits but will be available when sdist, etc commands are executed. + Notes + ----- If __svn_version__.py existed before, nothing is done. + + This is + intended for working with source directories that are in an SVN + repository. """ target = njoin(self.local_path,'__svn_version__.py') revision = self._get_svn_revision(self.local_path) @@ -1467,6 +1834,10 @@ class Configuration(object): def make_config_py(self,name='__config__'): """Generate package __config__.py file containing system_info information used during building the package. + + This file is installed to the + package installation directory. + """ self.py_modules.append((self.name,name,generate_config_py)) @@ -1478,6 +1849,9 @@ class Configuration(object): def get_info(self,*names): """Get resources information. + + Return information (from system_info.get_info) for all of the names in + the argument list in a single dictionary. """ from system_info import get_info, dict_append info_dict = {} @@ -1507,6 +1881,90 @@ def get_numpy_include_dirs(): # else running numpy/core/setup.py return include_dirs +def get_npy_pkg_dir(): + """Return the path where to find the npy-pkg-config directory.""" + # XXX: import here for bootstrapping reasons + import numpy + d = os.path.join(os.path.dirname(numpy.__file__), + 'core', 'lib', 'npy-pkg-config') + return d + +def get_pkg_info(pkgname, dirs=None): + """Given a clib package name, returns a info dict with the necessary + options to use the clib. + + Parameters + ---------- + pkgname: str + name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini) + dirs: seq {None} + if given, should be a sequence of additional directories where to look + for npy-pkg-config files. Those directories are search prior to the + numpy one. + + Note + ---- + Raise a numpy.distutils.PkgNotFound exception if the package is not + found. + + See Also + -------- + add_npy_pkg_info, add_installed_library, get_info + """ + from numpy.distutils.npy_pkg_config import read_config + + if dirs: + dirs.append(get_npy_pkg_dir()) + else: + dirs = [get_npy_pkg_dir()] + return read_config(pkgname, dirs) + +def get_info(pkgname, dirs=None): + """Given a clib package name, returns a info dict with the necessary + options to use the clib. + + Parameters + ---------- + pkgname: str + name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini) + dirs: seq {None} + if given, should be a sequence of additional directories where to look + for npy-pkg-config files. Those directories are search prior to the + numpy one. + + Note + ---- + Raise a numpy.distutils.PkgNotFound exception if the package is not + found. + + See Also + -------- + add_npy_pkg_info, add_installed_library, get_pkg_info + + Example + ------- + To get the necessary informations for the npymath library from NumPy: + + >>> npymath_info = get_info('npymath') + >>> config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) + """ + from numpy.distutils.npy_pkg_config import parse_flags + pkg_info = get_pkg_info(pkgname, dirs) + + # Translate LibraryInfo instance into a build_info dict + info = parse_flags(pkg_info.cflags()) + for k, v in parse_flags(pkg_info.libs()).items(): + info[k].extend(v) + + # add_extension extra_info argument is ANAL + info['define_macros'] = info['macros'] + del info['macros'] + del info['ignored'] + + return info + def is_bootstrapping(): import __builtin__ try: diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py new file mode 100644 index 000000000..2e42ee63a --- /dev/null +++ b/numpy/distutils/npy_pkg_config.py @@ -0,0 +1,306 @@ +from ConfigParser import SafeConfigParser, NoOptionError +import re +import os +import shlex + +__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', + 'read_config', 'parse_flags'] + +_VAR = re.compile('\$\{([a-zA-Z0-9_-]+)\}') + +class FormatError(IOError): + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + +class PkgNotFound(IOError): + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + +def parse_flags(line): + lexer = shlex.shlex(line) + lexer.whitespace_split = True + + d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], + 'macros': [], 'ignored': []} + def next_token(t): + if t.startswith('-I'): + if len(t) > 2: + d['include_dirs'].append(t[2:]) + else: + t = lexer.get_token() + d['include_dirs'].append(t) + elif t.startswith('-L'): + if len(t) > 2: + d['library_dirs'].append(t[2:]) + else: + t = lexer.get_token() + d['library_dirs'].append(t) + elif t.startswith('-l'): + d['libraries'].append(t[2:]) + elif t.startswith('-D'): + d['macros'].append(t[2:]) + else: + d['ignored'].append(t) + return lexer.get_token() + + t = lexer.get_token() + while t: + t = next_token(t) + + return d + +class LibraryInfo(object): + def __init__(self, name, description, version, sections, vars, requires=None): + self.name = name + self.description = description + if requires: + self.requires = requires + else: + self.requires = [] + self.version = version + self._sections = sections + self.vars = vars + + def sections(self): + return self._sections.keys() + + def cflags(self, section="default"): + return self.vars.interpolate(self._sections[section]['cflags']) + + def libs(self, section="default"): + return self.vars.interpolate(self._sections[section]['libs']) + + def __str__(self): + m = ['Name: %s' % self.name] + m.append('Description: %s' % self.description) + if self.requires: + m.append('Requires:') + else: + m.append('Requires: %s' % ",".join(self.requires)) + m.append('Version: %s' % self.version) + + return "\n".join(m) + +class VariableSet(object): + def __init__(self, d): + self._raw_data = dict([(k, v) for k, v in d.items()]) + + self._re = {} + self._re_sub = {} + + self._init_parse() + + def _init_parse(self): + for k, v in self._raw_data.items(): + self._init_parse_var(k, v) + + def _init_parse_var(self, name, value): + self._re[name] = re.compile(r'\$\{%s\}' % name) + self._re_sub[name] = value + + def interpolate(self, value): + # Brute force: we keep interpolating until there is no '${var}' anymore + # or until interpolated string is equal to input string + def _interpolate(value): + for k in self._re.keys(): + value = self._re[k].sub(self._re_sub[k], value) + return value + while _VAR.search(value): + nvalue = _interpolate(value) + if nvalue == value: + break + value = nvalue + + return value + + def variables(self): + return self._raw_data.keys() + + # Emulate a dict to set/get variables values + def __getitem__(self, name): + return self._raw_data[name] + + def __setitem__(self, name, value): + self._raw_data[name] = value + self._init_parse_var(name, value) + +def parse_meta(config): + if not config.has_section('meta'): + raise FormatError("No meta section found !") + + d = {} + for name, value in config.items('meta'): + d[name] = value + + for k in ['name', 'description', 'version']: + if not d.has_key(k): + raise FormatError("Option %s (section [meta]) is mandatory, " + "but not found" % k) + + if not d.has_key('requires'): + d['requires'] = [] + + return d + +def parse_variables(config): + if not config.has_section('variables'): + raise FormatError("No variables section found !") + + d = {} + + for name, value in config.items("variables"): + d[name] = value + + return VariableSet(d) + +def parse_sections(config): + return meta_d, r + +def pkg_to_filename(pkg_name): + return "%s.ini" % pkg_name + +def parse_config(filename, dirs=None): + if dirs: + filenames = [os.path.join(d, filename) for d in dirs] + else: + filenames = [filename] + + config = SafeConfigParser() + n = config.read(filenames) + if not len(n) >= 1: + raise PkgNotFound("Could not find file(s) %s" % str(filenames)) + + # Parse meta and variables sections + meta = parse_meta(config) + + vars = {} + if config.has_section('variables'): + for name, value in config.items("variables"): + vars[name] = value.replace("\\", "\\\\") + + # Parse "normal" sections + secs = [s for s in config.sections() if not s in ['meta', 'variables']] + sections = {} + + requires = {} + for s in secs: + d = {} + if config.has_option(s, "requires"): + requires[s] = config.get(s, 'requires') + + for name, value in config.items(s): + d[name] = value + sections[s] = d + + return meta, vars, sections, requires + +def _read_config_imp(filenames, dirs=None): + def _read_config(f): + meta, vars, sections, reqs = parse_config(f, dirs) + # recursively add sections and variables of required libraries + for rname, rvalue in reqs.items(): + nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) + + # Update var dict for variables not in 'top' config file + for k, v in nvars.items(): + if not vars.has_key(k): + vars[k] = v + + # Update sec dict + for oname, ovalue in nsections[rname].items(): + sections[rname][oname] += ' %s' % ovalue + + return meta, vars, sections, reqs + + meta, vars, sections, reqs = _read_config(filenames) + + return LibraryInfo(name=meta["name"], description=meta["description"], + version=meta["version"], sections=sections, vars=VariableSet(vars)) + +# Trivial cache to cache LibraryInfo instances creation. To be really +# efficient, the cache should be handled in read_config, since a same file can +# be parsed many time outside LibraryInfo creation, but I doubt this will be a +# problem in practice +_CACHE = {} +def read_config(pkgname, dirs=None): + try: + return _CACHE[pkgname] + except KeyError: + v = _read_config_imp(pkg_to_filename(pkgname), dirs) + _CACHE[pkgname] = v + return v + +# TODO: +# - implements version comparison (modversion + atleast) + +# pkg-config simple emulator - useful for debugging, and maybe later to query +# the system +if __name__ == '__main__': + import sys + from optparse import OptionParser + import glob + + parser = OptionParser() + parser.add_option("--cflags", dest="cflags", action="store_true", + help="output all preprocessor and compiler flags") + parser.add_option("--libs", dest="libs", action="store_true", + help="output all linker flags") + parser.add_option("--use-section", dest="section", + help="use this section instead of default for options") + parser.add_option("--version", dest="version", action="store_true", + help="output version") + parser.add_option("--atleast-version", dest="min_version", + help="Minimal version") + parser.add_option("--list-all", dest="list_all", action="store_true", + help="Minimal version") + parser.add_option("--define-variable", dest="define_variable", + help="Replace variable with the given value") + + (options, args) = parser.parse_args(sys.argv) + + if len(args) < 2: + raise ValueError("Expect package name on the command line:") + + if options.list_all: + files = glob.glob("*.ini") + for f in files: + info = read_config(f) + print "%s\t%s - %s" % (info.name, info.name, info.description) + + pkg_name = args[1] + import os + d = os.environ.get('NPY_PKG_CONFIG_PATH') + if d: + info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d]) + else: + info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.']) + + if options.section: + section = options.section + else: + section = "default" + + if options.define_variable: + m = re.search('([\S]+)=([\S]+)', options.define_variable) + if not m: + raise ValueError("--define-variable option should be of " \ + "the form --define-variable=foo=bar") + else: + name = m.group(1) + value = m.group(2) + info.vars[name] = value + + if options.cflags: + print info.cflags(section) + if options.libs: + print info.libs(section) + if options.version: + print info.version + if options.min_version: + print info.version >= options.min_version diff --git a/numpy/distutils/numpy_distribution.py b/numpy/distutils/numpy_distribution.py index 681b8b316..4424e34cd 100644 --- a/numpy/distutils/numpy_distribution.py +++ b/numpy/distutils/numpy_distribution.py @@ -7,6 +7,10 @@ class NumpyDistribution(Distribution): def __init__(self, attrs = None): # A list of (sconscripts, pre_hook, post_hook, src, parent_names) self.scons_data = [] + # A list of installable libraries + self.installed_libraries = [] + # A dict of pkg_config files to generate/install + self.installed_pkg_config = {} Distribution.__init__(self, attrs) def has_scons_scripts(self): diff --git a/numpy/distutils/tests/test_npy_pkg_config.py b/numpy/distutils/tests/test_npy_pkg_config.py new file mode 100644 index 000000000..5553aa878 --- /dev/null +++ b/numpy/distutils/tests/test_npy_pkg_config.py @@ -0,0 +1,96 @@ +import os +from tempfile import mkstemp + +from numpy.testing import * +from numpy.distutils.npy_pkg_config import read_config, parse_flags + +simple = """\ +[meta] +Name = foo +Description = foo lib +Version = 0.1 + +[default] +cflags = -I/usr/include +libs = -L/usr/lib +""" +simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', + 'version': '0.1', 'name': 'foo'} + +simple_variable = """\ +[meta] +Name = foo +Description = foo lib +Version = 0.1 + +[variables] +prefix = /foo/bar +libdir = ${prefix}/lib +includedir = ${prefix}/include + +[default] +cflags = -I${includedir} +libs = -L${libdir} +""" +simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', + 'version': '0.1', 'name': 'foo'} + +class TestLibraryInfo(TestCase): + def test_simple(self): + fd, filename = mkstemp('foo.ini') + try: + pkg = os.path.splitext(filename)[0] + try: + os.write(fd, simple) + finally: + os.close(fd) + + out = read_config(pkg) + self.failUnless(out.cflags() == simple_d['cflags']) + self.failUnless(out.libs() == simple_d['libflags']) + self.failUnless(out.name == simple_d['name']) + self.failUnless(out.version == simple_d['version']) + finally: + os.remove(filename) + + def test_simple_variable(self): + fd, filename = mkstemp('foo.ini') + try: + pkg = os.path.splitext(filename)[0] + try: + os.write(fd, simple_variable) + finally: + os.close(fd) + + out = read_config(pkg) + self.failUnless(out.cflags() == simple_variable_d['cflags']) + self.failUnless(out.libs() == simple_variable_d['libflags']) + self.failUnless(out.name == simple_variable_d['name']) + self.failUnless(out.version == simple_variable_d['version']) + + out.vars['prefix'] = '/Users/david' + self.failUnless(out.cflags() == '-I/Users/david/include') + finally: + os.remove(filename) + +class TestParseFlags(TestCase): + def test_simple_cflags(self): + d = parse_flags("-I/usr/include") + self.failUnless(d['include_dirs'] == ['/usr/include']) + + d = parse_flags("-I/usr/include -DFOO") + self.failUnless(d['include_dirs'] == ['/usr/include']) + self.failUnless(d['macros'] == ['FOO']) + + d = parse_flags("-I /usr/include -DFOO") + self.failUnless(d['include_dirs'] == ['/usr/include']) + self.failUnless(d['macros'] == ['FOO']) + + def test_simple_lflags(self): + d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") + self.failUnless(d['library_dirs'] == ['/usr/lib', '/usr/lib']) + self.failUnless(d['libraries'] == ['foo', 'bar']) + + d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") + self.failUnless(d['library_dirs'] == ['/usr/lib', '/usr/lib']) + self.failUnless(d['libraries'] == ['foo', 'bar']) diff --git a/numpy/doc/constants.py b/numpy/doc/constants.py index 22e353b0e..154c74621 100644 --- a/numpy/doc/constants.py +++ b/numpy/doc/constants.py @@ -64,9 +64,9 @@ add_newdoc('numpy', 'NAN', See Also -------- - isnan : Shows which elements are Not a Number. - isfinite : Shows which elements are finite (not one of - Not a Number, positive infinity and negative infinity) + isnan: Shows which elements are Not a Number. + + isfinite: Shows which elements are finite (not one of Not a Number, positive infinity and negative infinity) Notes ----- @@ -182,8 +182,8 @@ add_newdoc('numpy', 'NaN', -------- isnan : Shows which elements are Not a Number. - isfinite : Shows which elements are finite (not one of - Not a Number, positive infinity and negative infinity) + + isfinite : Shows which elements are finite (not one of Not a Number, positive infinity and negative infinity) Notes ----- diff --git a/numpy/doc/creation.py b/numpy/doc/creation.py index 133765678..d57c7c261 100644 --- a/numpy/doc/creation.py +++ b/numpy/doc/creation.py @@ -76,7 +76,7 @@ generally will not do for arbitrary start, stop, and step values. indices() will create a set of arrays (stacked as a one-higher dimensioned array), one per dimension with each representing variation in that dimension. -An examples illustrates much better than a verbal description: :: +An example illustrates much better than a verbal description: :: >>> np.indices((3,3)) array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]]) diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py index bbd44f8ee..de0338060 100644 --- a/numpy/doc/subclassing.py +++ b/numpy/doc/subclassing.py @@ -11,59 +11,133 @@ Gerard-Marchant - http://www.scipy.org/Subclasses. Introduction ------------ -Subclassing ndarray is relatively simple, but you will need to -understand some behavior of ndarrays to understand some minor -complications to subclassing. There are examples at the bottom of the -page, but you will probably want to read the background to understand -why subclassing works as it does. + +Subclassing ndarray is relatively simple, but it has some complications +compared to other Python objects. On this page we explain the machinery +that allows you to subclass ndarray, and the implications for +implementing a subclass. ndarrays and object creation ============================ -The creation of ndarrays is complicated by the need to return views of -ndarrays, that are also ndarrays. For example: + +Subclassing ndarray is complicated by the fact that new instances of +ndarray classes can come about in three different ways. These are: + +#. Explicit constructor call - as in ``MySubClass(params)``. This is + the usual route to Python instance creation. +#. View casting - casting an existing ndarray as a given subclass +#. New from template - creating a new instance from a template + instance. Examples include returning slices from a subclassed array, + creating return types from ufuncs, and copying arrays. See + :ref:`new-from-template` for more details + +The last two are characteristics of ndarrays - in order to support +things like array slicing. The complications of subclassing ndarray are +due to the mechanisms numpy has to support these latter two routes of +instance creation. + +.. _view-casting: + +View casting +------------ + +*View casting* is the standard ndarray mechanism by which you take an +ndarray of any subclass, and return a view of the array as another +(specified) subclass: >>> import numpy as np +>>> # create a completely useless ndarray subclass +>>> class C(np.ndarray): pass +>>> # create a standard ndarray >>> arr = np.zeros((3,)) ->>> type(arr) -<type 'numpy.ndarray'> ->>> v = arr[1:] ->>> type(v) -<type 'numpy.ndarray'> ->>> v is arr +>>> # take a view of it, as our useless subclass +>>> c_arr = arr.view(C) +>>> type(c_arr) +<class 'C'> + +.. _new-from-template: + +Creating new from template +-------------------------- + +New instances of an ndarray subclass can also come about by a very +similar mechanism to :ref:`view-casting`, when numpy finds it needs to +create a new instance from a template instance. The most obvious place +this has to happen is when you are taking slices of subclassed arrays. +For example: + +>>> v = c_arr[1:] +>>> type(v) # the view is of type 'C' +<class 'C'> +>>> v is c_arr # but it's a new instance False -So, when we take a view (here a slice) from the ndarray, we return a -new ndarray, that points to the data in the original. When we -subclass ndarray, taking a view (such as a slice) needs to return an -object of our own class. There is machinery to do this, but it is -this machinery that makes subclassing slightly non-standard. +The slice is a *view* onto the original ``c_arr`` data. So, when we +take a view from the ndarray, we return a new ndarray, of the same +class, that points to the data in the original. -To allow subclassing, and views of subclasses, ndarray uses the -ndarray ``__new__`` method for the main work of object initialization, -rather then the more usual ``__init__`` method. +There are other points in the use of ndarrays where we need such views, +such as copying arrays (``c_arr.copy()``), creating ufunc output arrays +(see also :ref:`array-wrap`), and reducing methods (like +``c_arr.mean()``. -``__new__`` and ``__init__`` -============================ +Relationship of view casting and new-from-template +-------------------------------------------------- + +These paths both use the same machinery. We make the distinction here, +because they result in different input to your methods. Specifically, +:ref:`view-casting` means you have created a new instance of your array +type from any potential subclass of ndarray. :ref:`new-from-template` +means you have created a new instance of your class from a pre-existing +instance, allowing you - for example - to copy across attributes that +are particular to your subclass. + +Implications for subclassing +---------------------------- -``__new__`` is a standard python method, and, if present, is called -before ``__init__`` when we create a class instance. Consider the -following:: +If we subclass ndarray, we need to deal not only with explicit +construction of our array type, but also :ref:`view-casting` or +:ref:`new-from-template`. Numpy has the machinery to do this, and this +machinery that makes subclassing slightly non-standard. + +There are two aspects to the machinery that ndarray uses to support +views and new-from-template in subclasses. + +The first is the use of the ``ndarray.__new__`` method for the main work +of object initialization, rather then the more usual ``__init__`` +method. The second is the use of the ``__array_finalize__`` method to +allow subclasses to clean up after the creation of views and new +instances from templates. + +A brief Python primer on ``__new__`` and ``__init__`` +===================================================== + +``__new__`` is a standard Python method, and, if present, is called +before ``__init__`` when we create a class instance. See the `python +__new__ documentation +<http://docs.python.org/reference/datamodel.html#object.__new__>`_ for more detail. + +For example, consider the following Python code: + +.. testcode:: class C(object): def __new__(cls, *args): + print 'Cls in __new__:', cls print 'Args in __new__:', args return object.__new__(cls, *args) + def __init__(self, *args): + print 'type(self) in __init__:', type(self) print 'Args in __init__:', args - C('hello') - -The code gives the following output:: +meaning that we get: - cls is: <class '__main__.C'> - Args in __new__: ('hello',) - self is : <__main__.C object at 0xb7dc720c> - Args in __init__: ('hello',) +>>> c = C('hello') +Cls in __new__: <class 'C'> +Args in __new__: ('hello',) +type(self) in __init__: <class 'C'> +Args in __init__: ('hello',) When we call ``C('hello')``, the ``__new__`` method gets its own class as first argument, and the passed argument, which is the string @@ -79,34 +153,29 @@ done in the ``__new__`` method. Why use ``__new__`` rather than just the usual ``__init__``? Because in some cases, as for ndarray, we want to be able to return an object -of some other class. Consider the following:: +of some other class. Consider the following: - class C(object): - def __new__(cls, *args): - print 'cls is:', cls - print 'Args in __new__:', args - return object.__new__(cls, *args) - def __init__(self, *args): - print 'self is :', self - print 'Args in __init__:', args +.. testcode:: class D(C): def __new__(cls, *args): print 'D cls is:', cls print 'D args in __new__:', args return C.__new__(C, *args) - def __init__(self, *args): - print 'D self is :', self - print 'D args in __init__:', args - D('hello') + def __init__(self, *args): + # we never get here + print 'In D __init__' -which gives:: +meaning that: - D cls is: <class '__main__.D'> - D args in __new__: ('hello',) - cls is: <class '__main__.C'> - Args in __new__: ('hello',) +>>> obj = D('hello') +D cls is: <class 'D'> +D args in __new__: ('hello',) +Cls in __new__: <class 'C'> +Args in __new__: ('hello',) +>>> type(obj) +<class 'C'> The definition of ``C`` is the same as before, but for ``D``, the ``__new__`` method returns an instance of class ``C`` rather than @@ -133,21 +202,107 @@ this way, in its standard methods for taking views, but the ndarray why not call ``obj = subdtype.__new__(...`` then? Because we may not have a ``__new__`` method with the same call signature). -So, when creating a new view object of our subclass, we need to be -able to set any extra attributes from the original object of our -class. This is the role of the ``__array_finalize__`` method of -ndarray. ``__array_finalize__`` is called from within the -ndarray machinery, each time we create an ndarray of our own class, -and passes in the new view object, created as above, as well as the -old object from which the view has been taken. In it we can take any -attributes from the old object and put then into the new view object, -or do any other related processing. Now we are ready for a simple -example. +The role of ``__array_finalize__`` +================================== + +``__array_finalize__`` is the mechanism that numpy provides to allow +subclasses to handle the various ways that new instances get created. + +Remember that subclass instances can come about in these three ways: + +#. explicit constructor call (``obj = MySubClass(params)``). This will + call the usual sequence of ``MySubClass.__new__`` then (if it exists) + ``MySubClass.__init__``. +#. :ref:`view-casting` +#. :ref:`new-from-template` + +Our ``MySubClass.__new__`` method only gets called in the case of the +explicit constructor call, so we can't rely on ``MySubClass.__new__`` or +``MySubClass.__init__`` to deal with the view casting and +new-from-template. It turns out that ``MySubClass.__array_finalize__`` +*does* get called for all three methods of object creation, so this is +where our object creation housekeeping usually goes. + +* For the explicit constructor call, our subclass will need to create a + new ndarray instance of its own class. In practice this means that + we, the authors of the code, will need to make a call to + ``ndarray.__new__(MySubClass,...)``, or do view casting of an existing + array (see below) +* For view casting and new-from-template, the equivalent of + ``ndarray.__new__(MySubClass,...`` is called, at the C level. + +The arguments that ``__array_finalize__`` recieves differ for the three +methods of instance creation above. + +The following code allows us to look at the call sequences and arguments: + +.. testcode:: + + import numpy as np + + class C(np.ndarray): + def __new__(cls, *args, **kwargs): + print 'In __new__ with class %s' % cls + return np.ndarray.__new__(cls, *args, **kwargs) + + def __init__(self, *args, **kwargs): + # in practice you probably will not need or want an __init__ + # method for your subclass + print 'In __init__ with class %s' % self.__class__ + + def __array_finalize__(self, obj): + print 'In array_finalize:' + print ' self type is %s' % type(self) + print ' obj type is %s' % type(obj) + + +Now: + +>>> # Explicit constructor +>>> c = C((10,)) +In __new__ with class <class 'C'> +In array_finalize: + self type is <class 'C'> + obj type is <type 'NoneType'> +In __init__ with class <class 'C'> +>>> # View casting +>>> a = np.arange(10) +>>> cast_a = a.view(C) +In array_finalize: + self type is <class 'C'> + obj type is <type 'numpy.ndarray'> +>>> # Slicing (example of new-from-template) +>>> cv = c[:1] +In array_finalize: + self type is <class 'C'> + obj type is <class 'C'> + +The signature of ``__array_finalize__`` is:: + + def __array_finalize__(self, obj): + +``ndarray.__new__`` passes ``__array_finalize__`` the new object, of our +own class (``self``) as well as the object from which the view has been +taken (``obj``). As you can see from the output above, the ``self`` is +always a newly created instance of our subclass, and the type of ``obj`` +differs for the three instance creation methods: + +* When called from the explicit constructor, ``obj`` is ``None`` +* When called from view casting, ``obj`` can be an instance of any + subclass of ndarray, including our own. +* When called in new-from-template, ``obj`` is another instance of our + own subclass, that we might use to update the new ``self`` instance. + +Because ``__array_finalize__`` is the only method that always sees new +instances being created, it is the sensible place to fill in instance +defaults for new object attributes, among other tasks. + +This may be clearer with an example. Simple example - adding an extra attribute to ndarray ----------------------------------------------------- -:: +.. testcode:: import numpy as np @@ -156,45 +311,79 @@ Simple example - adding an extra attribute to ndarray def __new__(subtype, shape, dtype=float, buffer=None, offset=0, strides=None, order=None, info=None): # Create the ndarray instance of our type, given the usual - # input arguments. This will call the standard ndarray - # constructor, but return an object of our type + # ndarray input arguments. This will call the standard + # ndarray constructor, but return an object of our type. + # It also triggers a call to InfoArray.__array_finalize__ obj = np.ndarray.__new__(subtype, shape, dtype, buffer, offset, strides, order) - # add the new attribute to the created instance + # set the new 'info' attribute to the value passed obj.info = info # Finally, we must return the newly created object: return obj - def __array_finalize__(self,obj): - # reset the attribute from passed original object + def __array_finalize__(self, obj): + # ``self`` is a new object resulting from + # ndarray.__new__(InfoArray, ...), therefore it only has + # attributes that the ndarray.__new__ constructor gave it - + # i.e. those of a standard ndarray. + # + # We could have got to the ndarray.__new__ call in 3 ways: + # From an explicit constructor - e.g. InfoArray(): + # obj is None + # (we're in the middle of the InfoArray.__new__ + # constructor, and self.info will be set when we return to + # InfoArray.__new__) + if obj is None: return + # From view casting - e.g arr.view(InfoArray): + # obj is arr + # (type(obj) can be InfoArray) + # From new-from-template - e.g infoarr[:3] + # type(obj) is InfoArray + # + # Note that it is here, rather than in the __new__ method, + # that we set the default value for 'info', because this + # method sees all creation of default objects - with the + # InfoArray.__new__ constructor, but also with + # arr.view(InfoArray). self.info = getattr(obj, 'info', None) # We do not need to return anything - obj = InfoArray(shape=(3,), info='information') - print type(obj) - print obj.info - v = obj[1:] - print type(v) - print v.info -which gives:: - - <class '__main__.InfoArray'> - information - <class '__main__.InfoArray'> - information - -This class isn't very useful, because it has the same constructor as -the bare ndarray object, including passing in buffers and shapes and -so on. We would probably prefer to be able to take an already formed -ndarray from the usual numpy calls to ``np.array`` and return an +Using the object looks like this: + + >>> obj = InfoArray(shape=(3,)) # explicit constructor + >>> type(obj) + <class 'InfoArray'> + >>> obj.info is None + True + >>> obj = InfoArray(shape=(3,), info='information') + >>> obj.info + 'information' + >>> v = obj[1:] # new-from-template - here - slicing + >>> type(v) + <class 'InfoArray'> + >>> v.info + 'information' + >>> arr = np.arange(10) + >>> cast_arr = arr.view(InfoArray) # view casting + >>> type(cast_arr) + <class 'InfoArray'> + >>> cast_arr.info is None + True + +This class isn't very useful, because it has the same constructor as the +bare ndarray object, including passing in buffers and shapes and so on. +We would probably prefer the constructor to be able to take an already +formed ndarray from the usual numpy calls to ``np.array`` and return an object. Slightly more realistic example - attribute added to existing array ------------------------------------------------------------------- -Here is a class (with thanks to Pierre GM for the original example), -that takes array that already exists, casts as our type, and adds an -extra attribute:: + +Here is a class that takes a standard ndarray that already exists, casts +as our type, and adds an extra attribute. + +.. testcode:: import numpy as np @@ -209,77 +398,154 @@ extra attribute:: # Finally, we must return the newly created object: return obj - def __array_finalize__(self,obj): - # reset the attribute from passed original object + def __array_finalize__(self, obj): + # see InfoArray.__array_finalize__ for comments + if obj is None: return self.info = getattr(obj, 'info', None) - # We do not need to return anything - arr = np.arange(5) - obj = RealisticInfoArray(arr, info='information') - print type(obj) - print obj.info - v = obj[1:] - print type(v) - print v.info -which gives:: +So: - <class '__main__.RealisticInfoArray'> - information - <class '__main__.RealisticInfoArray'> - information + >>> arr = np.arange(5) + >>> obj = RealisticInfoArray(arr, info='information') + >>> type(obj) + <class 'RealisticInfoArray'> + >>> obj.info + 'information' + >>> v = obj[1:] + >>> type(v) + <class 'RealisticInfoArray'> + >>> v.info + 'information' + +.. _array-wrap: ``__array_wrap__`` for ufuncs ------------------------------ +------------------------------------------------------- -Let's say you have an instance ``obj`` of your new subclass, -``RealisticInfoArray``, and you pass it into a ufunc with another -array:: +``__array_wrap__`` gets called at the end of numpy ufuncs and other numpy +functions, to allow a subclass to set the type of the return value +and update attributes and metadata. Let's show how this works with an example. +First we make the same subclass as above, but with a different name and +some print statements: - arr = np.arange(5) - ret = np.multiply.outer(arr, obj) +.. testcode:: -When a numpy ufunc is called on a subclass of ndarray, the -__array_wrap__ method is called to transform the result into a new -instance of the subclass. By default, __array_wrap__ will call -__array_finalize__, and the attributes will be inherited. + import numpy as np -By defining a specific __array_wrap__ method for our subclass, we can -tweak the output. The __array_wrap__ method requires one argument, the -object on which the ufunc is applied, and an optional parameter -*context*. This parameter is returned by some ufuncs as a 3-element -tuple: (name of the ufunc, argument of the ufunc, domain of the -ufunc). See the masked array subclass for an implementation. + class MySubClass(np.ndarray): -Extra gotchas - custom __del__ methods and ndarray.base -------------------------------------------------------- -One of the problems that ndarray solves is that of memory ownership of -ndarrays and their views. Consider the case where we have created an -ndarray, ``arr`` and then taken a view with ``v = arr[1:]``. If we -then do ``del v``, we need to make sure that the ``del`` does not -delete the memory pointed to by the view, because we still need it for -the original ``arr`` object. Numpy therefore keeps track of where the -data came from for a particular array or view, with the ``base`` attribute:: + def __new__(cls, input_array, info=None): + obj = np.asarray(input_array).view(cls) + obj.info = info + return obj - import numpy as np + def __array_finalize__(self, obj): + print 'In __array_finalize__:' + print ' self is %s' % repr(self) + print ' obj is %s' % repr(obj) + if obj is None: return + self.info = getattr(obj, 'info', None) - # A normal ndarray, that owns its own data - arr = np.zeros((4,)) - # In this case, base is None - assert arr.base is None - # We take a view - v1 = arr[1:] - # base now points to the array that it derived from - assert v1.base is arr - # Take a view of a view - v2 = v1[1:] - # base points to the view it derived from - assert v2.base is v1 - -The assertions all succeed in this case. In general, if the array -owns its own memory, as for ``arr`` in this case, then ``arr.base`` -will be None - there are some exceptions to this - see the numpy book -for more details. + def __array_wrap__(self, out_arr, context=None): + print 'In __array_wrap__:' + print ' self is %s' % repr(self) + print ' arr is %s' % repr(out_arr) + # then just call the parent + return np.ndarray.__array_wrap__(self, out_arr, context) + +We run a ufunc on an instance of our new array: + +>>> obj = MySubClass(np.arange(5), info='spam') +In __array_finalize__: + self is MySubClass([0, 1, 2, 3, 4]) + obj is array([0, 1, 2, 3, 4]) +>>> arr2 = np.arange(5)+1 +>>> ret = np.add(arr2, obj) +In __array_wrap__: + self is MySubClass([0, 1, 2, 3, 4]) + arr is array([1, 3, 5, 7, 9]) +In __array_finalize__: + self is MySubClass([1, 3, 5, 7, 9]) + obj is MySubClass([0, 1, 2, 3, 4]) +>>> ret +MySubClass([1, 3, 5, 7, 9]) +>>> ret.info +'spam' + +Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method of the +input with the highest ``__array_priority__`` value, in this case +``MySubClass.__array_wrap__``, with arguments ``self`` as ``obj``, and +``out_arr`` as the (ndarray) result of the addition. In turn, the +default ``__array_wrap__`` (``ndarray.__array_wrap__``) has cast the +result to class ``MySubClass``, and called ``__array_finalize__`` - +hence the copying of the ``info`` attribute. This has all happened at the C level. + +But, we could do anything we wanted: + +.. testcode:: + + class SillySubClass(np.ndarray): + + def __array_wrap__(self, arr, context=None): + return 'I lost your data' + +>>> arr1 = np.arange(5) +>>> obj = arr1.view(SillySubClass) +>>> arr2 = np.arange(5) +>>> ret = np.multiply(obj, arr2) +>>> ret +'I lost your data' + +So, by defining a specific ``__array_wrap__`` method for our subclass, +we can tweak the output from ufuncs. The ``__array_wrap__`` method +requires ``self``, then an argument - which is the result of the ufunc - +and an optional parameter *context*. This parameter is returned by some +ufuncs as a 3-element tuple: (name of the ufunc, argument of the ufunc, +domain of the ufunc). ``__array_wrap__`` should return an instance of +its containing class. See the masked array subclass for an +implementation. + +In addition to ``__array_wrap__``, which is called on the way out of the +ufunc, there is also an ``__array_prepare__`` method which is called on +the way into the ufunc, after the output arrays are created but before any +computation has been performed. The default implementation does nothing +but pass through the array. ``__array_prepare__`` should not attempt to +access the array data or resize the array, it is intended for setting the +output array type, updating attributes and metadata, and performing any +checks based on the input that may be desired before computation begins. +Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or +subclass thereof or raise an error. + +Extra gotchas - custom ``__del__`` methods and ndarray.base +----------------------------------------------------------- + +One of the problems that ndarray solves is keeping track of memory +ownership of ndarrays and their views. Consider the case where we have +created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``. +The two objects are looking at the same memory. Numpy keeps track of +where the data came from for a particular array or view, with the +``base`` attribute: + +>>> # A normal ndarray, that owns its own data +>>> arr = np.zeros((4,)) +>>> # In this case, base is None +>>> arr.base is None +True +>>> # We take a view +>>> v1 = arr[1:] +>>> # base now points to the array that it derived from +>>> v1.base is arr +True +>>> # Take a view of a view +>>> v2 = v1[1:] +>>> # base points to the view it derived from +>>> v2.base is v1 +True + +In general, if the array owns its own memory, as for ``arr`` in this +case, then ``arr.base`` will be None - there are some exceptions to this +- see the numpy book for more details. The ``base`` attribute is useful in being able to tell whether we have a view or the original array. This in turn can be useful if we need @@ -289,4 +555,5 @@ the original array is deleted, but not the views. For an example of how this can work, have a look at the ``memmap`` class in ``numpy.core``. + """ diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 8f4b979c5..9901eb11d 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -1942,9 +1942,9 @@ def _kind_func(string): if string[0] in "'\"": string = string[1:-1] if real16pattern.match(string): - return 16 - elif real8pattern.match(string): return 8 + elif real8pattern.match(string): + return 4 return 'kind('+string+')' def _selected_int_kind_func(r): diff --git a/numpy/fft/fftpack.py b/numpy/fft/fftpack.py index 5fd56246e..fd973a123 100644 --- a/numpy/fft/fftpack.py +++ b/numpy/fft/fftpack.py @@ -141,7 +141,6 @@ def fft(a, n=None, axis=-1): 1.14383329e-17 +1.22460635e-16j, -1.64863782e-15 +1.77635684e-15j]) - >>> from numpy.fft import fft, fftfreq >>> import matplotlib.pyplot as plt >>> t = np.arange(256) @@ -591,7 +590,6 @@ def fftn(a, s=None, axes=None): [[-2.+0.j, -2.+0.j, -2.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j]]]) - >>> from numpy import meshgrid, pi, arange, sin, cos, log, abs >>> from numpy.fft import fftn, fftshift >>> import matplotlib.pyplot as plt diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py index c5e7822f2..b8ae9a9f3 100644 --- a/numpy/lib/arraysetops.py +++ b/numpy/lib/arraysetops.py @@ -3,40 +3,36 @@ Set operations for 1D numeric arrays based on sorting. :Contains: ediff1d, - unique1d, + unique, intersect1d, - intersect1d_nu, setxor1d, - setmember1d, - setmember1d_nu, + in1d, union1d, setdiff1d +:Deprecated: + unique1d, + intersect1d_nu, + setmember1d + :Notes: -All functions work best with integer numerical arrays on input (e.g. indices). -For floating point arrays, innacurate results may appear due to usual round-off +For floating point arrays, inaccurate results may appear due to usual round-off and floating point comparison issues. -Except unique1d, union1d and intersect1d_nu, all functions expect inputs with -unique elements. Speed could be gained in some operations by an implementaion of -sort(), that can provide directly the permutation vectors, avoiding thus calls -to argsort(). +Speed could be gained in some operations by an implementation of +sort(), that can provide directly the permutation vectors, avoiding +thus calls to argsort(). -Run _test_unique1d_speed() to compare performance of numpy.unique1d() and -numpy.unique() - it should be the same. - -To do: Optionally return indices analogously to unique1d for all functions. - -created: 01.11.2005 -last revision: 07.01.2007 +To do: Optionally return indices analogously to unique for all functions. :Author: Robert Cimrman """ __all__ = ['ediff1d', 'unique1d', 'intersect1d', 'intersect1d_nu', 'setxor1d', - 'setmember1d', 'setmember1d_nu', 'union1d', 'setdiff1d'] + 'setmember1d', 'union1d', 'setdiff1d', 'unique', 'in1d'] import numpy as np +from numpy.lib.utils import deprecate_with_doc def ediff1d(ary, to_end=None, to_begin=None): """ @@ -50,7 +46,7 @@ def ediff1d(ary, to_end=None, to_begin=None): If provided, this number will be tacked onto the end of the returned differences. to_begin : number, optional - If provided, this number will be taked onto the beginning of the + If provided, this number will be tacked onto the beginning of the returned differences. Returns @@ -73,26 +69,26 @@ def ediff1d(ary, to_end=None, to_begin=None): arrays.append(to_end) if len(arrays) != 1: - # We'll save ourselves a copy of a potentially large array in the common - # case where neither to_begin or to_end was given. + # We'll save ourselves a copy of a potentially large array in + # the common case where neither to_begin or to_end was given. ed = np.hstack(arrays) return ed -def unique1d(ar1, return_index=False, return_inverse=False): +def unique(ar, return_index=False, return_inverse=False): """ Find the unique elements of an array. Parameters ---------- - ar1 : array_like + ar : array_like This array will be flattened if it is not already 1-D. return_index : bool, optional If True, also return the indices against `ar1` that result in the unique array. return_inverse : bool, optional If True, also return the indices against the unique array that - result in `ar1`. + result in `ar`. Returns ------- @@ -112,17 +108,17 @@ def unique1d(ar1, return_index=False, return_inverse=False): Examples -------- - >>> np.unique1d([1, 1, 2, 2, 3, 3]) + >>> np.unique([1, 1, 2, 2, 3, 3]) array([1, 2, 3]) >>> a = np.array([[1, 1], [2, 3]]) - >>> np.unique1d(a) + >>> np.unique(a) array([1, 2, 3]) Reconstruct the input from unique values: - >>> np.unique1d([1,2,6,4,2,3,2], return_index=True) + >>> np.unique([1,2,6,4,2,3,2], return_index=True) >>> x = [1,2,6,4,2,3,2] - >>> u, i = np.unique1d(x, return_inverse=True) + >>> u, i = np.unique(x, return_inverse=True) >>> u array([1, 2, 3, 4, 6]) >>> i @@ -131,14 +127,15 @@ def unique1d(ar1, return_index=False, return_inverse=False): [1, 2, 6, 4, 2, 3, 2] """ - if return_index: - import warnings - warnings.warn("The order of the output arguments for " - "`return_index` has changed. Before, " - "the output was (indices, unique_arr), but " - "has now been reversed to be more consistent.") + try: + ar = ar.flatten() + except AttributeError: + if not return_inverse and not return_index: + items = sorted(set(ar)) + return np.asarray(items) + else: + ar = np.asanyarray(ar).flatten() - ar = np.asanyarray(ar1).flatten() if ar.size == 0: if return_inverse and return_index: return ar, np.empty(0, np.bool), np.empty(0, np.bool) @@ -166,44 +163,18 @@ def unique1d(ar1, return_index=False, return_inverse=False): flag = np.concatenate(([True], ar[1:] != ar[:-1])) return ar[flag] -def intersect1d(ar1, ar2): - """ - Intersection returning repeated or unique elements common to both arrays. - - Parameters - ---------- - ar1,ar2 : array_like - Input arrays. - - Returns - ------- - out : ndarray, shape(N,) - Sorted 1D array of common elements with repeating elements. - - See Also - -------- - intersect1d_nu : Returns only unique common elements. - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Examples - -------- - >>> np.intersect1d([1,3,3],[3,1,1]) - array([1, 1, 3, 3]) - - """ - aux = np.concatenate((ar1,ar2)) - aux.sort() - return aux[aux[1:] == aux[:-1]] -def intersect1d_nu(ar1, ar2): +def intersect1d(ar1, ar2, assume_unique=False): """ Intersection returning unique elements common to both arrays. Parameters ---------- - ar1,ar2 : array_like + ar1, ar2 : array_like Input arrays. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. Returns ------- @@ -212,34 +183,34 @@ def intersect1d_nu(ar1, ar2): See Also -------- - intersect1d : Returns repeated or unique common elements. numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Examples -------- - >>> np.intersect1d_nu([1,3,3],[3,1,1]) + >>> np.intersect1d([1,3,3], [3,1,1]) array([1, 3]) """ - # Might be faster than unique1d( intersect1d( ar1, ar2 ) )? - aux = np.concatenate((unique1d(ar1), unique1d(ar2))) + if not assume_unique: + # Might be faster than unique( intersect1d( ar1, ar2 ) )? + ar1 = unique(ar1) + ar2 = unique(ar2) + aux = np.concatenate( (ar1, ar2) ) aux.sort() return aux[aux[1:] == aux[:-1]] -def setxor1d(ar1, ar2): +def setxor1d(ar1, ar2, assume_unique=False): """ - Set exclusive-or of 1D arrays with unique elements. - - Use unique1d() to generate arrays with only unique elements to use as - inputs to this function. + Set exclusive-or of two 1D arrays. Parameters ---------- - ar1 : array_like - Input array. - ar2 : array_like - Input array. + ar1, ar2 : array_like + Input arrays. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. Returns ------- @@ -252,7 +223,11 @@ def setxor1d(ar1, ar2): performing set operations on arrays. """ - aux = np.concatenate((ar1, ar2)) + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = np.concatenate( (ar1, ar2) ) if aux.size == 0: return aux @@ -263,98 +238,68 @@ def setxor1d(ar1, ar2): flag2 = flag[1:] == flag[:-1] return aux[flag2] -def setmember1d(ar1, ar2): +def in1d(ar1, ar2, assume_unique=False): """ - Return a boolean array set True where first element is in second array. - - Boolean array is the shape of `ar1` containing True where the elements - of `ar1` are in `ar2` and False otherwise. + Test whether each element of an array is also present in a second array. - Use unique1d() to generate arrays with only unique elements to use as - inputs to this function. + Returns a boolean array the same length as `ar1` that is True + where an element of `ar1` is in `ar2` and False otherwise. Parameters ---------- - ar1 : array_like - Input array. - ar2 : array_like - Input array. + ar1, ar2 : array_like + Input arrays. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. Returns ------- mask : ndarray, bool The values `ar1[mask]` are in `ar2`. - See Also -------- - setmember1d_nu : Works for arrays with non-unique elements. numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. + Notes + ----- + .. versionadded:: 1.4.0 + Examples -------- >>> test = np.arange(5) >>> states = [0, 2] - >>> mask = np.setmember1d(test,states) + >>> mask = np.setmember1d(test, states) >>> mask array([ True, False, True, False, False], dtype=bool) >>> test[mask] array([0, 2]) """ - # We need this to be a stable sort, so always use 'mergesort' here. The - # values from the first array should always come before the values from the - # second array. - ar = np.concatenate( (ar1, ar2 ) ) + if not assume_unique: + ar1, rev_idx = np.unique(ar1, return_inverse=True) + ar2 = np.unique(ar2) + + ar = np.concatenate( (ar1, ar2) ) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] equal_adj = (sar[1:] == sar[:-1]) flag = np.concatenate( (equal_adj, [False] ) ) - indx = order.argsort(kind='mergesort')[:len( ar1 )] - return flag[indx] - -def setmember1d_nu(ar1, ar2): - """ - Return a boolean array set True where first element is in second array. - - Boolean array is the shape of `ar1` containing True where the elements - of `ar1` are in `ar2` and False otherwise. - - Unlike setmember1d(), this version works also for arrays with duplicate - values. It uses setmember1d() internally. For arrays with unique - entries it is slower than calling setmember1d() directly. - - Parameters - ---------- - ar1 : array_like - Input array. - ar2 : array_like - Input array. - Returns - ------- - mask : ndarray, bool - The values `ar1[mask]` are in `ar2`. - - See Also - -------- - setmember1d : Faster for arrays with unique elements. - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - """ - unique_ar1, rev_idx = np.unique1d(ar1, return_inverse=True) - mask = np.setmember1d(unique_ar1, np.unique1d(ar2)) - return mask[rev_idx] + if assume_unique: + return flag[indx] + else: + return flag[indx][rev_idx] def union1d(ar1, ar2): """ - Union of 1D arrays with unique elements. - - Use unique1d() to generate arrays with only unique elements to use as - inputs to this function. + Union of two 1D arrays. Parameters ---------- @@ -374,14 +319,11 @@ def union1d(ar1, ar2): performing set operations on arrays. """ - return unique1d( np.concatenate( (ar1, ar2) ) ) + return unique( np.concatenate( (ar1, ar2) ) ) -def setdiff1d(ar1, ar2): +def setdiff1d(ar1, ar2, assume_unique=False): """ - Set difference of 1D arrays with unique elements. - - Use unique1d() to generate arrays with only unique elements to use as - inputs to this function. + Set difference of two 1D arrays. Parameters ---------- @@ -389,6 +331,9 @@ def setdiff1d(ar1, ar2): Input array. ar2 : array_like Input comparison array. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. Returns ------- @@ -401,8 +346,80 @@ def setdiff1d(ar1, ar2): performing set operations on arrays. """ - aux = setmember1d(ar1,ar2) + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + aux = in1d(ar1, ar2, assume_unique=True) if aux.size == 0: return aux else: return np.asarray(ar1)[aux == 0] + +@deprecate_with_doc('') +def unique1d(ar1, return_index=False, return_inverse=False): + """ + This function is deprecated. Use unique() instead. + """ + if return_index: + import warnings + warnings.warn("The order of the output arguments for " + "`return_index` has changed. Before, " + "the output was (indices, unique_arr), but " + "has now been reversed to be more consistent.") + + ar = np.asanyarray(ar1).flatten() + if ar.size == 0: + if return_inverse and return_index: + return ar, np.empty(0, np.bool), np.empty(0, np.bool) + elif return_inverse or return_index: + return ar, np.empty(0, np.bool) + else: + return ar + + if return_inverse or return_index: + perm = ar.argsort() + aux = ar[perm] + flag = np.concatenate(([True], aux[1:] != aux[:-1])) + if return_inverse: + iflag = np.cumsum(flag) - 1 + iperm = perm.argsort() + if return_index: + return aux[flag], perm[flag], iflag[iperm] + else: + return aux[flag], iflag[iperm] + else: + return aux[flag], perm[flag] + + else: + ar.sort() + flag = np.concatenate(([True], ar[1:] != ar[:-1])) + return ar[flag] + +@deprecate_with_doc('') +def intersect1d_nu(ar1, ar2): + """ + This function is deprecated. Use intersect1d() + instead. + """ + # Might be faster than unique1d( intersect1d( ar1, ar2 ) )? + aux = np.concatenate((unique1d(ar1), unique1d(ar2))) + aux.sort() + return aux[aux[1:] == aux[:-1]] + +@deprecate_with_doc('') +def setmember1d(ar1, ar2): + """ + This function is deprecated. Use in1d(assume_unique=True) + instead. + """ + # We need this to be a stable sort, so always use 'mergesort' here. The + # values from the first array should always come before the values from the + # second array. + ar = np.concatenate( (ar1, ar2 ) ) + order = ar.argsort(kind='mergesort') + sar = ar[order] + equal_adj = (sar[1:] == sar[:-1]) + flag = np.concatenate( (equal_adj, [False] ) ) + + indx = order.argsort(kind='mergesort')[:len( ar1 )] + return flag[indx] diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py index 0cef1c4d2..503d43647 100644 --- a/numpy/lib/financial.py +++ b/numpy/lib/financial.py @@ -28,6 +28,18 @@ def fv(rate, nper, pmt, pv, when='end'): """ Compute the future value. + Given: + * a present value, `pv` + * an interest `rate` compounded once per period, of which + there are + * `nper` total + * a (fixed) payment, `pmt`, paid either + * at the beginning (`when` = {'begin', 1}) or the end + (`when` = {'end', 0}) of each period + + Return: + the value at the end of the `nper` periods + Parameters ---------- rate : scalar or array_like of shape(M, ) @@ -61,6 +73,17 @@ def fv(rate, nper, pmt, pv, when='end'): fv + pv + pmt * nper == 0 + References + ---------- + .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). + Open Document Format for Office Applications (OpenDocument)v1.2, + Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, + Pre-Draft 12. Organization for the Advancement of Structured Information + Standards (OASIS). Billerica, MA, USA. [ODT Document]. + Available: + http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula + OpenDocument-formula-20090508.odt + Examples -------- What is the future value after 10 years of saving $100 now, with @@ -94,6 +117,19 @@ def pmt(rate, nper, pv, fv=0, when='end'): """ Compute the payment against loan principal plus interest. + Given: + * a present value, `pv` (e.g., an amount borrowed) + * a future value, `fv` (e.g., 0) + * an interest `rate` compounded once per period, of which + there are + * `nper` total + * and (optional) specification of whether payment is made + at the beginning (`when` = {'begin', 1}) or the end + (`when` = {'end', 0}) of each period + + Return: + the (fixed) periodic payment. + Parameters ---------- rate : array_like @@ -102,8 +138,8 @@ def pmt(rate, nper, pv, fv=0, when='end'): Number of compounding periods pv : array_like Present value - fv : array_like - Future value + fv : array_like (optional) + Future value (default = 0) when : {{'begin', 1}, {'end', 0}}, {string, int} When payments are due ('begin' (1) or 'end' (0)) @@ -117,7 +153,7 @@ def pmt(rate, nper, pv, fv=0, when='end'): Notes ----- - The payment ``pmt`` is computed by solving the equation:: + The payment is computed by solving the equation:: fv + pv*(1 + rate)**nper + @@ -127,16 +163,37 @@ def pmt(rate, nper, pv, fv=0, when='end'): fv + pv + pmt * nper == 0 + for ``pmt``. + + Note that computing a monthly mortgage payment is only + one use for this function. For example, pmt returns the + periodic deposit one must make to achieve a specified + future balance given an initial deposit, a fixed, + periodically compounded interest rate, and the total + number of periods. + + References + ---------- + .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). + Open Document Format for Office Applications (OpenDocument)v1.2, + Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, + Pre-Draft 12. Organization for the Advancement of Structured Information + Standards (OASIS). Billerica, MA, USA. [ODT Document]. + Available: + http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula + OpenDocument-formula-20090508.odt + Examples -------- - What would the monthly payment need to be to pay off a $200,000 loan in 15 + What is the monthly payment needed to pay off a $200,000 loan in 15 years at an annual interest rate of 7.5%? >>> np.pmt(0.075/12, 12*15, 200000) -1854.0247200054619 - In order to pay-off (i.e. have a future-value of 0) the $200,000 obtained - today, a monthly payment of $1,854.02 would be required. + In order to pay-off (i.e., have a future-value of 0) the $200,000 obtained + today, a monthly payment of $1,854.02 would be required. Note that this + example illustrates usage of `fv` having a default value of 0. """ when = _convert_when(when) @@ -282,6 +339,18 @@ def pv(rate, nper, pmt, fv=0.0, when='end'): """ Compute the present value. + Given: + * a future value, `fv` + * an interest `rate` compounded once per period, of which + there are + * `nper` total + * a (fixed) payment, `pmt`, paid either + * at the beginning (`when` = {'begin', 1}) or the end + (`when` = {'end', 0}) of each period + + Return: + the value now + Parameters ---------- rate : array_like @@ -302,7 +371,7 @@ def pv(rate, nper, pmt, fv=0.0, when='end'): Notes ----- - The present value ``pv`` is computed by solving the equation:: + The present value is computed by solving the equation:: fv + pv*(1 + rate)**nper + @@ -312,6 +381,45 @@ def pv(rate, nper, pmt, fv=0.0, when='end'): fv + pv + pmt * nper = 0 + for `pv`, which is then returned. + + References + ---------- + .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). + Open Document Format for Office Applications (OpenDocument)v1.2, + Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, + Pre-Draft 12. Organization for the Advancement of Structured Information + Standards (OASIS). Billerica, MA, USA. [ODT Document]. + Available: + http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula + OpenDocument-formula-20090508.odt + + Examples + -------- + What is the present value (e.g., the initial investment) + of an investment that needs to total $15692.93 + after 10 years of saving $100 every month? Assume the + interest rate is 5% (annually) compounded monthly. + + >>> np.pv(0.05/12, 10*12, -100, 15692.93) + -100.00067131625819 + + By convention, the negative sign represents cash flow out + (i.e., money not available today). Thus, to end up with + $15,692.93 in 10 years saving $100 a month at 5% annual + interest, one's initial deposit should also be $100. + + If any input is array_like, ``pv`` returns an array of equal shape. + Let's compare different interest rates in the example above: + + >>> a = np.array((0.05, 0.04, 0.03))/12 + >>> np.pv(a, 10*12, -100, 15692.93) + array([ -100.00067132, -649.26771385, -1273.78633713]) + + So, to end up with the same $15692.93 under the same $100 per month + "savings plan," for annual interest rates of 4% and 3%, one would + need initial investments of $649.27 and $1273.79, respectively. + """ when = _convert_when(when) rate, nper, pmt, fv, when = map(np.asarray, [rate, nper, pmt, fv, when]) @@ -391,24 +499,54 @@ def irr(values): """ Return the Internal Rate of Return (IRR). - This is the rate of return that gives a net present value of 0.0. + This is the "average" periodically compounded rate of return + that gives a net present value of 0.0; for a more complete explanation, + see Notes below. Parameters ---------- values : array_like, shape(N,) - Input cash flows per time period. At least the first value would be - negative to represent the investment in the project. + Input cash flows per time period. By convention, net "deposits" + are negative and net "withdrawals" are positive. Thus, for example, + at least the first element of `values`, which represents the initial + investment, will typically be negative. Returns ------- out : float Internal Rate of Return for periodic input values. + Notes + ----- + The IRR is perhaps best understood through an example (illustrated + using np.irr in the Examples section below). Suppose one invests + 100 units and then makes the following withdrawals at regular + (fixed) intervals: 39, 59, 55, 20. Assuming the ending value is 0, + one's 100 unit investment yields 173 units; however, due to the + combination of compounding and the periodic withdrawals, the + "average" rate of return is neither simply 0.73/4 nor (1.73)^0.25-1. + Rather, it is the solution (for :math:`r`) of the equation: + + .. math:: -100 + \\frac{39}{1+r} + \\frac{59}{(1+r)^2} + + \\frac{55}{(1+r)^3} + \\frac{20}{(1+r)^4} = 0 + + In general, for `values` :math:`= [v_0, v_1, ... v_M]`, + irr is the solution of the equation: [G]_ + + .. math:: \\sum_{t=0}^M{\\frac{v_t}{(1+irr)^{t}}} = 0 + + References + ---------- + .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., + Addison-Wesley, 2003, pg. 348. + Examples -------- >>> np.irr([-100, 39, 59, 55, 20]) 0.2809484211599611 + (Compare with the Example given for numpy.lib.financial.npv) + """ res = np.roots(values[::-1]) # Find the root(s) between 0 and 1 @@ -430,8 +568,14 @@ def npv(rate, values): rate : scalar The discount rate. values : array_like, shape(M, ) - The values of the time series of cash flows. Must be the same - increment as the `rate`. + The values of the time series of cash flows. The (fixed) time + interval between cash flow "events" must be the same as that + for which `rate` is given (i.e., if `rate` is per year, then + precisely a year is understood to elapse between each cash flow + event). By convention, investments or "deposits" are negative, + income or "withdrawals" are positive; `values` must begin with + the initial investment, thus `values[0]` will typically be + negative. Returns ------- @@ -440,9 +584,21 @@ def npv(rate, values): Notes ----- - Returns the result of: + Returns the result of: [G]_ + + .. math :: \\sum_{t=0}^M{\\frac{values_t}{(1+rate)^{t}}} + + References + ---------- + .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., + Addison-Wesley, 2003, pg. 346. - .. math :: \\sum_{t=1}^M{\\frac{values_t}{(1+rate)^{t}}} + Examples + -------- + >>> np.npv(0.281,[-100, 39, 59, 55, 20]) + -0.0066187288356340801 + + (Compare with the Example given for numpy.lib.financial.irr) """ values = np.asarray(values) @@ -456,7 +612,7 @@ def mirr(values, finance_rate, reinvest_rate): ---------- values : array_like Cash flows (must contain at least one positive and one negative value) - or nan is returned. + or nan is returned. The first value is considered a sunk cost at time zero. finance_rate : scalar Interest rate paid on the cash flows reinvest_rate : scalar @@ -469,13 +625,13 @@ def mirr(values, finance_rate, reinvest_rate): """ - values = np.asarray(values) + values = np.asarray(values, dtype=np.double) + n = values.size pos = values > 0 neg = values < 0 - if not (pos.size > 0 and neg.size > 0): + if not (pos.any() and neg.any()): return np.nan + numer = np.abs(npv(reinvest_rate, values*pos))*(1 + reinvest_rate) + denom = np.abs(npv(finance_rate, values*neg))*(1 + finance_rate) + return (numer/denom)**(1.0/(n - 1))*(1 + reinvest_rate) - 1 - n = pos.size + neg.size - numer = -npv(reinvest_rate, values[pos])*((1+reinvest_rate)**n) - denom = npv(finance_rate, values[neg])*(1+finance_rate) - return (numer / denom)**(1.0/(n-1)) - 1 diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index b493801df..663c3d2ef 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3,7 +3,7 @@ __all__ = ['logspace', 'linspace', 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', - 'unique', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax', + 'extract', 'place', 'nansum', 'nanmax', 'nanargmax', 'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average', 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning', @@ -28,6 +28,7 @@ from numpy.lib.twodim_base import diag from _compiled_base import _insert, add_docstring from _compiled_base import digitize, bincount, interp as compiled_interp from arraysetops import setdiff1d +from utils import deprecate_with_doc import numpy as np #end Fernando's utilities @@ -377,11 +378,11 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, new=None): n = np.diff(n) - if normed is False: - return n, bins - elif normed is True: + if normed: db = array(np.diff(bins), float) return n/(n*db).sum(), bins + else: + return n, bins def histogramdd(sample, bins=10, range=None, normed=False, weights=None): @@ -717,9 +718,9 @@ def piecewise(x, condlist, funclist, *args, **kw): Parameters ---------- - x : (N,) ndarray + x : ndarray The input domain. - condlist : list of M (N,)-shaped boolean arrays + condlist : list of bool arrays Each boolean array corresponds to a function in `funclist`. Wherever `condlist[i]` is True, `funclist[i](x)` is used as the output value. @@ -727,24 +728,24 @@ def piecewise(x, condlist, funclist, *args, **kw): and should therefore be of the same shape as `x`. The length of `condlist` must correspond to that of `funclist`. - If one extra function is given, i.e. if the length of `funclist` is - M+1, then that extra function is the default value, used wherever - all conditions are false. - funclist : list of M or M+1 callables, f(x,*args,**kw), or values + If one extra function is given, i.e. if + ``len(funclist) - len(condlist) == 1``, then that extra function + is the default value, used wherever all conditions are false. + funclist : list of callables, f(x,*args,**kw), or scalars Each function is evaluated over `x` wherever its corresponding condition is True. It should take an array as input and give an array or a scalar value as output. If, instead of a callable, - a value is provided then a constant function (``lambda x: value``) is + a scalar is provided then a constant function (``lambda x: scalar``) is assumed. args : tuple, optional Any further arguments given to `piecewise` are passed to the functions - upon execution, i.e., if called ``piecewise(...,...,1,'a')``, then - each function is called as ``f(x,1,'a')``. - kw : dictionary, optional + upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then + each function is called as ``f(x, 1, 'a')``. + kw : dict, optional Keyword arguments used in calling `piecewise` are passed to the functions upon execution, i.e., if called - ``piecewise(...,...,lambda=1)``, then each function is called as - ``f(x,lambda=1)``. + ``piecewise(..., ..., lambda=1)``, then each function is called as + ``f(x, lambda=1)``. Returns ------- @@ -754,6 +755,11 @@ def piecewise(x, condlist, funclist, *args, **kw): as defined by the boolean arrays in `condlist`. Portions not covered by any condition have undefined values. + + See Also + -------- + choose, select, where + Notes ----- This is similar to choose or select, except that functions are @@ -773,8 +779,8 @@ def piecewise(x, condlist, funclist, *args, **kw): -------- Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. - >>> x = np.arange(6) - 2.5 # x runs from -2.5 to 2.5 in steps of 1 - >>> np.piecewise(x, [x < 0, x >= 0.5], [-1,1]) + >>> x = np.arange(6) - 2.5 + >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) array([-1., -1., -1., 1., 1., 1.]) Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for @@ -836,39 +842,35 @@ def select(condlist, choicelist, default=0): Parameters ---------- - condlist : list of N boolean arrays of length M - The conditions C_0 through C_(N-1) which determine - from which vector the output elements are taken. - choicelist : list of N arrays of length M - Th vectors V_0 through V_(N-1), from which the output - elements are chosen. + condlist : list of bool ndarrays + The list of conditions which determine from which array in `choicelist` + the output elements are taken. When multiple conditions are satisfied, + the first one encountered in `condlist` is used. + choicelist : list of ndarrays + The list of arrays from which the output elements are taken. It has + to be of the same length as `condlist`. + default : scalar, optional + The element inserted in `output` when all conditions evaluate to False. Returns ------- - output : 1-dimensional array of length M - The output at position m is the m-th element of the first - vector V_n for which C_n[m] is non-zero. Note that the - output depends on the order of conditions, since the - first satisfied condition is used. - - Notes - ----- - Equivalent to: - :: + output : ndarray + The output at position m is the m-th element of the array in + `choicelist` where the m-th element of the corresponding array in + `condlist` is True. - output = [] - for m in range(M): - output += [V[m] for V,C in zip(values,cond) if C[m]] - or [default] + See Also + -------- + where : Return elements from one of two arrays depending on condition. + take, choose, compress, diag, diagonal Examples -------- - >>> t = np.arange(10) - >>> s = np.arange(10)*100 - >>> condlist = [t == 4, t > 5] - >>> choicelist = [s, t] + >>> x = np.arange(10) + >>> condlist = [x<3, x>5] + >>> choicelist = [x, x**2] >>> np.select(condlist, choicelist) - array([ 0, 0, 0, 0, 400, 0, 6, 7, 8, 9]) + array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81]) """ n = len(condlist) @@ -960,11 +962,17 @@ def gradient(f, *varargs): Examples -------- - >>> np.gradient(np.array([[1,1],[3,4]])) - [array([[ 2., 3.], - [ 2., 3.]]), - array([[ 0., 0.], - [ 1., 1.]])] + >>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) + >>> np.gradient(x) + array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + >>> np.gradient(x, 2) + array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) + + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)) + [array([[ 2., 2., -1.], + [ 2., 2., -1.]]), + array([[ 1. , 2.5, 4. ], + [ 1. , 1. , 1. ]])] """ N = len(f.shape) # number of dimensions @@ -1026,7 +1034,11 @@ def gradient(f, *varargs): def diff(a, n=1, axis=-1): """ - Calculate the nth order discrete difference along given axis. + Calculate the n-th order discrete difference along given axis. + + The first order difference is given by ``out[n] = a[n+1] - a[n]`` along + the given axis, higher order differences are calculated by using `diff` + recursively. Parameters ---------- @@ -1035,26 +1047,31 @@ def diff(a, n=1, axis=-1): n : int, optional The number of times values are differenced. axis : int, optional - The axis along which the difference is taken. + The axis along which the difference is taken, default is the last axis. Returns ------- out : ndarray - The `n` order differences. The shape of the output is the same as `a` - except along `axis` where the dimension is `n` less. + The `n` order differences. The shape of the output is the same as `a` + except along `axis` where the dimension is smaller by `n`. + + See Also + -------- + gradient, ediff1d Examples -------- - >>> x = np.array([0,1,3,9,5,10]) + >>> x = np.array([1, 2, 4, 7, 0]) >>> np.diff(x) - array([ 1, 2, 6, -4, 5]) - >>> np.diff(x,n=2) - array([ 1, 4, -10, 9]) - >>> x = np.array([[1,3,6,10],[0,5,6,8]]) + array([ 1, 2, 3, -7]) + >>> np.diff(x, n=2) + array([ 1, 1, -10]) + + >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) >>> np.diff(x) array([[2, 3, 4], - [5, 1, 2]]) - >>> np.diff(x,axis=0) + [5, 1, 2]]) + >>> np.diff(x, axis=0) array([[-1, 2, 0, -2]]) """ @@ -1201,15 +1218,34 @@ def unwrap(p, discont=pi, axis=-1): ---------- p : array_like Input array. - discont : float - Maximum discontinuity between values. - axis : integer - Axis along which unwrap will operate. + discont : float, optional + Maximum discontinuity between values, default is ``pi``. + axis : int, optional + Axis along which unwrap will operate, default is the last axis. Returns ------- out : ndarray - Output array + Output array. + + See Also + -------- + rad2deg, deg2rad + + Notes + ----- + If the discontinuity in `p` is smaller than ``pi``, but larger than + `discont`, no unwrapping is done because taking the 2*pi complement + would only make the discontinuity larger. + + Examples + -------- + >>> phase = np.linspace(0, np.pi, num=5) + >>> phase[3:] += np.pi + >>> phase + array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) + >>> np.unwrap(phase) + array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) """ p = asarray(p) @@ -1310,31 +1346,11 @@ import sys if sys.hexversion < 0x2040000: from sets import Set as set +@deprecate_with_doc('') def unique(x): """ - Return the sorted, unique elements of an array or sequence. - - Parameters - ---------- - x : ndarray or sequence - Input array. - - Returns - ------- - y : ndarray - The sorted, unique elements are returned in a 1-D array. - - Examples - -------- - >>> np.unique([1, 1, 2, 2, 3, 3]) - array([1, 2, 3]) - >>> a = np.array([[1, 1], [2, 3]]) - >>> np.unique(a) - array([1, 2, 3]) - - >>> np.unique([True, True, False]) - array([False, True], dtype=bool) - + This function is deprecated. Use numpy.lib.arraysetops.unique() + instead. """ try: tmp = x.flatten() @@ -1365,53 +1381,64 @@ def extract(condition, arr): See Also -------- - take, put, putmask + take, put, putmask, compress Examples -------- - >>> arr = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12]]) + >>> arr = np.arange(12).reshape((3, 4)) >>> arr - array([[ 1, 2, 3, 4], - [ 5, 6, 7, 8], - [ 9, 10, 11, 12]]) + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) >>> condition = np.mod(arr, 3)==0 >>> condition - array([[False, False, True, False], - [False, True, False, False], - [ True, False, False, True]], dtype=bool) + array([[ True, False, False, True], + [False, False, True, False], + [False, True, False, False]], dtype=bool) >>> np.extract(condition, arr) - array([ 3, 6, 9, 12]) + array([0, 3, 6, 9]) + If `condition` is boolean: >>> arr[condition] - array([ 3, 6, 9, 12]) + array([0, 3, 6, 9]) """ return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) def place(arr, mask, vals): """ - Changes elements of an array based on conditional and input values. + Change elements of an array based on conditional and input values. - Similar to ``putmask(a, mask, vals)`` but the 1D array `vals` has the - same number of elements as the non-zero values of `mask`. Inverse of - ``extract``. + Similar to ``np.putmask(a, mask, vals)``, the difference is that `place` + uses the first N elements of `vals`, where N is the number of True values + in `mask`, while `putmask` uses the elements where `mask` is True. - Sets `a`.flat[n] = `values`\\[n] for each n where `mask`.flat[n] is true. + Note that `extract` does the exact opposite of `place`. Parameters ---------- a : array_like Array to put data into. mask : array_like - Boolean mask array. - values : array_like, shape(number of non-zero `mask`, ) - Values to put into `a`. + Boolean mask array. Must have the same size as `a`. + vals : 1-D sequence + Values to put into `a`. Only the first N elements are used, where + N is the number of True values in `mask`. If `vals` is smaller + than N it will be repeated. See Also -------- - putmask, put, take + putmask, put, take, extract + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> np.place(x, x>2, [44, 55]) + >>> x + array([[ 0, 1, 2], + [44, 55, 44]]) """ return _insert(arr, mask, vals) @@ -2841,6 +2868,25 @@ def trapz(y, x=None, dx=1.0, axis=-1): axis : int, optional Specify the axis. + Returns + ------- + out : float + Definite integral as approximated by trapezoidal rule. + + Notes + ----- + Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will + be taken from `y` array, by default x-axis distances between points will be + 1.0, alternatively they can be provided with `x` array or with `dx` scalar. + Return value will be equal to combined area under the red lines. + + + References + ---------- + .. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule + + .. [2] Illustration image: http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png + Examples -------- >>> np.trapz([1,2,3]) diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py index b8add9ed7..eeb1d37aa 100644 --- a/numpy/lib/index_tricks.py +++ b/numpy/lib/index_tricks.py @@ -3,29 +3,38 @@ __all__ = ['unravel_index', 'ogrid', 'r_', 'c_', 's_', 'index_exp', 'ix_', - 'ndenumerate','ndindex'] + 'ndenumerate','ndindex', + 'fill_diagonal','diag_indices','diag_indices_from'] import sys import numpy.core.numeric as _nx -from numpy.core.numeric import asarray, ScalarType, array +from numpy.core.numeric import ( asarray, ScalarType, array, alltrue, cumprod, + arange ) from numpy.core.numerictypes import find_common_type import math import function_base import numpy.core.defmatrix as matrix +from function_base import diff makemat = matrix.matrix # contributed by Stefan van der Walt def unravel_index(x,dims): """ - Convert a flat index into an index tuple for an array of given shape. + Convert a flat index to an index tuple for an array of given shape. Parameters ---------- x : int Flattened index. - dims : shape tuple - Input shape. + dims : tuple of ints + Input shape, the shape of an array into which indexing is + required. + + Returns + ------- + idx : tuple of ints + Tuple of the same shape as `dims`, containing the unraveled index. Notes ----- @@ -34,7 +43,7 @@ def unravel_index(x,dims): Examples -------- - >>> arr = np.arange(20).reshape(5,4) + >>> arr = np.arange(20).reshape(5, 4) >>> arr array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], @@ -72,21 +81,45 @@ def unravel_index(x,dims): return tuple(x/dim_prod % dims) def ix_(*args): - """ Construct an open mesh from multiple sequences. + """ + Construct an open mesh from multiple sequences. + + This function takes N 1-D sequences and returns N outputs with N + dimensions each, such that the shape is 1 in all but one dimension + and the dimension with the non-unit shape value cycles through all + N dimensions. - This function takes n 1-d sequences and returns n outputs with n - dimensions each such that the shape is 1 in all but one dimension and - the dimension with the non-unit shape value cycles through all n - dimensions. + Using `ix_` one can quickly construct index arrays that will index + the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array + ``[a[1,2] a[1,5] a[3,2] a[3,5]]``. - Using ix_() one can quickly construct index arrays that will index - the cross product. + Parameters + ---------- + args : 1-D sequences - a[ix_([1,3,7],[2,5,8])] returns the array + Returns + ------- + out : ndarrays + N arrays with N dimensions each, with N the number of input + sequences. Together these arrays form an open mesh. + + See Also + -------- + ogrid, mgrid, meshgrid + + Examples + -------- + >>> a = np.arange(10).reshape(2, 5) + >>> ixgrid = np.ix_([0,1], [2,4]) + >>> ixgrid + (array([[0], + [1]]), array([[2, 4]])) + >>> print ixgrid[0].shape, ixgrid[1].shape + (2, 1) (1, 2) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) - a[1,2] a[1,5] a[1,8] - a[3,2] a[3,5] a[3,8] - a[7,2] a[7,5] a[7,8] """ out = [] nd = len(args) @@ -215,7 +248,11 @@ mgrid.__doc__ = None # set in numpy.add_newdocs ogrid.__doc__ = None # set in numpy.add_newdocs class AxisConcatenator(object): - """Translates slice objects to concatenation along an axis. + """ + Translates slice objects to concatenation along an axis. + + For detailed documentation on usage, see `r_`. + """ def _retval(self, res): if self.matrix: @@ -338,11 +375,96 @@ class AxisConcatenator(object): # in help(r_) class RClass(AxisConcatenator): - """Translates slice objects to concatenation along the first axis. + """ + Translates slice objects to concatenation along the first axis. + + This is a simple way to build up arrays quickly. There are two use cases. + + 1. If the index expression contains comma separated arrays, then stack + them along their first axis. + 2. If the index expression contains slice notation or scalars then create + a 1-D array with a range indicated by the slice notation. + + If slice notation is used, the syntax ``start:stop:step`` is equivalent + to ``np.arange(start, stop, step)`` inside of the brackets. However, if + ``step`` is an imaginary number (i.e. 100j) then its integer portion is + interpreted as a number-of-points desired and the start and stop are + inclusive. In other words ``start:stop:stepj`` is interpreted as + ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets. + After expansion of slice notation, all comma separated sequences are + concatenated together. + + Optional character strings placed as the first element of the index + expression can be used to change the output. The strings 'r' or 'c' result + in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) + matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 + (column) matrix is produced. If the result is 2-D then both provide the + same matrix result. + + A string integer specifies which axis to stack multiple comma separated + arrays along. A string of two comma-separated integers allows indication + of the minimum number of dimensions to force each entry into as the + second integer (the axis to concatenate along is still the first integer). + + A string with three comma-separated integers allows specification of the + axis to concatenate along, the minimum number of dimensions to force the + entries to, and which axis should contain the start of the arrays which + are less than the specified number of dimensions. In other words the third + integer allows you to specify where the 1's should be placed in the shape + of the arrays that have their shapes upgraded. By default, they are placed + in the front of the shape tuple. The third argument allows you to specify + where the start of the array should be instead. Thus, a third argument of + '0' would place the 1's at the end of the array shape. Negative integers + specify where in the new shape tuple the last dimension of upgraded arrays + should be placed, so the default is '-1'. + + Parameters + ---------- + Not a function, so takes no parameters - For example: + + Returns + ------- + A concatenated ndarray or matrix. + + See Also + -------- + concatenate : Join a sequence of arrays together. + c_ : Translates slice objects to concatenation along the second axis. + + Examples + -------- >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] array([1, 2, 3, 0, 0, 4, 5, 6]) + >>> np.r_[-1:1:6j, [0]*3, 5, 6] + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) + + String integers specify the axis to concatenate along or the minimum + number of dimensions to force entries into. + + >>> np.r_['-1', a, a] # concatenate along last axis + array([[0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5]]) + >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2 + array([[1, 2, 3], + [4, 5, 6]]) + + >>> np.r_['0,2,0', [1,2,3], [4,5,6]] + array([[1], + [2], + [3], + [4], + [5], + [6]]) + >>> np.r_['1,2,0', [1,2,3], [4,5,6]] + array([[1, 4], + [2, 5], + [3, 6]]) + + Using 'r' or 'c' as a first string argument creates a matrix. + + >>> np.r_['r',[1,2,3], [4,5,6]] + matrix([[1, 2, 3, 4, 5, 6]]) """ def __init__(self): @@ -351,11 +473,21 @@ class RClass(AxisConcatenator): r_ = RClass() class CClass(AxisConcatenator): - """Translates slice objects to concatenation along the second axis. + """ + Translates slice objects to concatenation along the second axis. + + This is short-hand for ``np.r_['-1,2,0', index expression]``, which is + useful because of its common occurrence. In particular, arrays will be + stacked along their last axis after being upgraded to at least 2-D with + 1's post-pended to the shape (column vectors made out of 1-D arrays). - For example: + For detailed documentation, see `r_`. + + Examples + -------- >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] - array([1, 2, 3, 0, 0, 4, 5, 6]) + array([[1, 2, 3, 0, 0, 4, 5, 6]]) + """ def __init__(self): AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) @@ -373,9 +505,13 @@ class ndenumerate(object): a : ndarray Input array. + See Also + -------- + ndindex, flatiter + Examples -------- - >>> a = np.array([[1,2],[3,4]]) + >>> a = np.array([[1, 2], [3, 4]]) >>> for index, x in np.ndenumerate(a): ... print index, x (0, 0) 1 @@ -388,6 +524,17 @@ class ndenumerate(object): self.iter = asarray(arr).flat def next(self): + """ + Standard iterator method, returns the index tuple and array value. + + Returns + ------- + coords : tuple of ints + The indices of the current iteration. + val : scalar + The array element of the current iteration. + + """ return self.iter.coords, self.iter.next() def __iter__(self): @@ -399,17 +546,21 @@ class ndindex(object): An N-dimensional iterator object to index arrays. Given the shape of an array, an `ndindex` instance iterates over - the N-dimensional index of the array. At each iteration, the index of the - last dimension is incremented by one. + the N-dimensional index of the array. At each iteration a tuple + of indices is returned, the last dimension is iterated over first. Parameters ---------- - `*args` : integers - The size of each dimension in the counter. + `*args` : ints + The size of each dimension of the array. + + See Also + -------- + ndenumerate, flatiter Examples -------- - >>> for index in np.ndindex(3,2,1): + >>> for index in np.ndindex(3, 2, 1): ... print index (0, 0, 0) (0, 1, 0) @@ -442,9 +593,25 @@ class ndindex(object): self._incrementone(axis-1) def ndincr(self): + """ + Increment the multi-dimensional index by one. + + `ndincr` takes care of the "wrapping around" of the axes. + It is called by `ndindex.next` and not normally used directly. + + """ self._incrementone(self.nd-1) def next(self): + """ + Standard iterator method, updates the index and returns the index tuple. + + Returns + ------- + val : tuple of ints + Returns a tuple containing the indices of the current iteration. + + """ if (self.index >= self.total): raise StopIteration val = tuple(self.ind) @@ -501,3 +668,167 @@ index_exp = IndexExpression(maketuple=True) s_ = IndexExpression(maketuple=False) # End contribution from Konrad. + + +# The following functions complement those in twodim_base, but are +# applicable to N-dimensions. + +def fill_diagonal(a, val): + """Fill the main diagonal of the given array of any dimensionality. + + For an array with ndim > 2, the diagonal is the list of locations with + indices a[i,i,...,i], all identical. + + This function modifies the input array in-place, it does not return a + value. + + This functionality can be obtained via diag_indices(), but internally this + version uses a much faster implementation that never constructs the indices + and uses simple slicing. + + Parameters + ---------- + a : array, at least 2-dimensional. + Array whose diagonal is to be filled, it gets modified in-place. + + val : scalar + Value to be written on the diagonal, its type must be compatible with + that of the array a. + + See also + -------- + diag_indices, diag_indices_from + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + >>> a = zeros((3,3),int) + >>> fill_diagonal(a,5) + >>> a + array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + + The same function can operate on a 4-d array: + >>> a = zeros((3,3,3,3),int) + >>> fill_diagonal(a,4) + + We only show a few blocks for clarity: + >>> a[0,0] + array([[4, 0, 0], + [0, 0, 0], + [0, 0, 0]]) + >>> a[1,1] + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 0]]) + >>> a[2,2] + array([[0, 0, 0], + [0, 0, 0], + [0, 0, 4]]) + + """ + if a.ndim < 2: + raise ValueError("array must be at least 2-d") + if a.ndim == 2: + # Explicit, fast formula for the common case. For 2-d arrays, we + # accept rectangular ones. + step = a.shape[1] + 1 + else: + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not alltrue(diff(a.shape)==0): + raise ValueError("All dimensions of input must be of equal length") + step = 1 + (cumprod(a.shape[:-1])).sum() + + # Write the value out into the diagonal. + a.flat[::step] = val + + +def diag_indices(n, ndim=2): + """Return the indices to access the main diagonal of an array. + + This returns a tuple of indices that can be used to access the main + diagonal of an array with ndim (>=2) dimensions and shape (n,n,...,n). For + ndim=2 this is the usual diagonal, for ndim>2 this is the set of indices + to access A[i,i,...,i] for i=[0..n-1]. + + Parameters + ---------- + n : int + The size, along each dimension, of the arrays for which the returned + indices can be used. + + ndim : int, optional + The number of dimensions. + + Notes + ----- + .. versionadded:: 1.4.0 + + See also + -------- + diag_indices_from + + Examples + -------- + Create a set of indices to access the diagonal of a (4,4) array: + >>> di = diag_indices(4) + + >>> a = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]) + >>> a + array([[ 1, 2, 3, 4], + [ 5, 6, 7, 8], + [ 9, 10, 11, 12], + [13, 14, 15, 16]]) + >>> a[di] = 100 + >>> a + array([[100, 2, 3, 4], + [ 5, 100, 7, 8], + [ 9, 10, 100, 12], + [ 13, 14, 15, 100]]) + + Now, we create indices to manipulate a 3-d array: + >>> d3 = diag_indices(2,3) + + And use it to set the diagonal of a zeros array to 1: + >>> a = zeros((2,2,2),int) + >>> a[d3] = 1 + >>> a + array([[[1, 0], + [0, 0]], + + [[0, 0], + [0, 1]]]) + + """ + idx = arange(n) + return (idx,) * ndim + + +def diag_indices_from(arr): + """Return the indices to access the main diagonal of an n-dimensional array. + + See diag_indices() for full details. + + Parameters + ---------- + arr : array, at least 2-d + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + + if not arr.ndim >= 2: + raise ValueError("input array must be at least 2-d") + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not alltrue(diff(arr.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + + return diag_indices(arr.shape[0], arr.ndim) diff --git a/numpy/lib/info.py b/numpy/lib/info.py index f93234d57..4a781a2ca 100644 --- a/numpy/lib/info.py +++ b/numpy/lib/info.py @@ -135,12 +135,11 @@ Set operations for 1D numeric arrays based on sort() function. ================ =================== ediff1d Array difference (auxiliary function). -unique1d Unique elements of 1D array. +unique Unique elements of an array. intersect1d Intersection of 1D arrays with unique elements. -intersect1d_nu Intersection of 1D arrays with any elements. setxor1d Set exclusive-or of 1D arrays with unique elements. -setmember1d Return an array of shape of ar1 containing 1 where - the elements of ar1 are in ar2 and 0 otherwise. +in1d Test whether elements in a 1D array are also present in + another array. union1d Union of 1D arrays with unique elements. setdiff1d Set difference of 1D arrays with unique elements. ================ =================== diff --git a/numpy/lib/io.py b/numpy/lib/io.py index 98d071fab..3a962c7e1 100644 --- a/numpy/lib/io.py +++ b/numpy/lib/io.py @@ -118,6 +118,27 @@ class NpzFile(object): else: raise KeyError, "%s is not a file in the archive" % key + + def __iter__(self): + return iter(self.files) + + def items(self): + return [(f, self[f]) for f in self.files] + + def iteritems(self): + for f in self.files: + yield (f, self[f]) + + def keys(self): + return self.files + + def iterkeys(self): + return self.__iter__() + + def __contains__(self, key): + return self.files.__contains__(key) + + def load(file, mmap_mode=None): """ Load a pickled, ``.npy``, or ``.npz`` binary file. @@ -126,6 +147,7 @@ def load(file, mmap_mode=None): ---------- file : file-like object or string The file to read. It must support ``seek()`` and ``read()`` methods. + If the filename extension is ``.gz``, the file is first decompressed. mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional If not None, then memory-map the file, using the given mode (see `numpy.memmap`). The mode has no effect for pickled or @@ -146,6 +168,11 @@ def load(file, mmap_mode=None): IOError If the input file does not exist or cannot be read. + See Also + -------- + save, savez, loadtxt + memmap : Create a memory-map to an array stored in a file on disk. + Notes ----- - If the file contains pickle data, then whatever is stored in the @@ -202,20 +229,20 @@ def load(file, mmap_mode=None): def save(file, arr): """ - Save an array to a binary file in NumPy format. + Save an array to a binary file in NumPy ``.npy`` format. Parameters ---------- - f : file or string + file : file or string File or filename to which the data is saved. If the filename does not already have a ``.npy`` extension, it is added. - x : array_like - Array data. + arr : array_like + Array data to be saved. See Also -------- - savez : Save several arrays into an .npz compressed archive - savetxt : Save an array to a file as plain text + savez : Save several arrays into a .npz compressed archive + savetxt, load Examples -------- @@ -225,7 +252,7 @@ def save(file, arr): >>> x = np.arange(10) >>> np.save(outfile, x) - >>> outfile.seek(0) + >>> outfile.seek(0) # only necessary in this example (with tempfile) >>> np.load(outfile) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -273,6 +300,12 @@ def savez(file, *args, **kwds): The .npz file format is a zipped archive of files named after the variables they contain. Each file contains one variable in .npy format. + Examples + -------- + >>> x = np.random.random((3, 3)) + >>> y = np.zeros((3, 2)) + >>> np.savez('data', x=x, y=y) + """ # Import is postponed to here since zipfile depends on gzip, an optional @@ -523,20 +556,20 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None, def savetxt(fname, X, fmt='%.18e',delimiter=' '): """ - Save an array to file. + Save an array to a text file. Parameters ---------- - fname : filename or a file handle - If the filename ends in .gz, the file is automatically saved in - compressed gzip format. The load() command understands gzipped - files transparently. + fname : filename or file handle + If the filename ends in ``.gz``, the file is automatically saved in + compressed gzip format. `loadtxt` understands gzipped files + transparently. X : array_like - Data. - fmt : string or sequence of strings + Data to be saved to a text file. + fmt : str or sequence of strs A single format (%10.5f), a sequence of formats, or a multi-format string, e.g. 'Iteration %d -- %10.5f', in which - case delimiter is ignored. + case `delimiter` is ignored. delimiter : str Character separating columns. @@ -588,15 +621,20 @@ def savetxt(fname, X, fmt='%.18e',delimiter=' '): ``x,X`` : unsigned hexadecimal integer - This is not an exhaustive specification. - + This explanation of ``fmt`` is not complete, for an exhaustive + specification see [1]_. + References + ---------- + .. [1] `Format Specification Mini-Language + <http://docs.python.org/library/string.html# + format-specification-mini-language>`_, Python Documentation. Examples -------- - >>> savetxt('test.out', x, delimiter=',') # X is an array - >>> savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays - >>> savetxt('test.out', x, fmt='%1.4e') # use exponential notation + >>> savetxt('test.out', x, delimiter=',') # X is an array + >>> savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays + >>> savetxt('test.out', x, fmt='%1.4e') # use exponential notation """ @@ -712,15 +750,13 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0, Each line past the first `skiprows` ones is split at the `delimiter` character, and characters following the `comments` character are discarded. - - Parameters ---------- - fname : file or string - File or filename to read. If the filename extension is `.gz` or `.bz2`, - the file is first decompressed. - dtype : data-type + fname : {file, string} + File or filename to read. If the filename extension is `.gz` or + `.bz2`, the file is first decompressed. + dtype : dtype Data type of the resulting array. If this is a flexible data-type, the resulting array will be 1-dimensional, and each row will be interpreted as an element of the array. In this case, the number @@ -729,20 +765,20 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0, of the dtype. If None, the dtypes will be determined by the contents of each column, individually. - comments : {string}, optional + comments : string, optional The character used to indicate the start of a comment. All the characters occurring on a line after a comment are discarded - delimiter : {string}, optional + delimiter : string, optional The string used to separate values. By default, any consecutive whitespace act as delimiter. - skiprows : {int}, optional + skiprows : int, optional Numbers of lines to skip at the beginning of the file. converters : {None, dictionary}, optional A dictionary mapping column number to a function that will convert values in the column to a number. Converters can also be used to provide a default value for missing data: ``converters = {3: lambda s: float(s or 0)}``. - missing : {string}, optional + missing : string, optional A string representing a missing value, irrespective of the column where it appears (e.g., `'missing'` or `'unused'`). missing_values : {None, dictionary}, optional @@ -757,20 +793,21 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0, If `names` is a sequence or a single-string of comma-separated names, the names will be used to define the field names in a flexible dtype. If `names` is None, the names of the dtype fields will be used, if any. - excludelist : {sequence}, optional + excludelist : sequence, optional A list of names to exclude. This list is appended to the default list ['return','file','print']. Excluded names are appended an underscore: for example, `file` would become `file_`. - deletechars : {string}, optional - A string combining invalid characters that must be deleted from the names. + deletechars : string, optional + A string combining invalid characters that must be deleted from the + names. case_sensitive : {True, False, 'upper', 'lower'}, optional If True, field names are case_sensitive. If False or 'upper', field names are converted to upper case. If 'lower', field names are converted to lower case. - unpack : {bool}, optional + unpack : bool, optional If True, the returned array is transposed, so that arguments may be unpacked using ``x, y, z = loadtxt(...)`` - usemask : {bool}, optional + usemask : bool, optional If True, returns a masked array. If False, return a regular standard array. @@ -779,23 +816,20 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0, out : MaskedArray Data read from the text file. - Notes + See Also -------- + numpy.loadtxt : equivalent function when no data is missing. + + Notes + ----- * When spaces are used as delimiters, or when no delimiter has been given as input, there should not be any missing data between two fields. * When the variable are named (either by a flexible dtype or with `names`, - there must not be any header in the file (else a :exc:ValueError exception - is raised). - - Warnings - -------- + there must not be any header in the file (else a :exc:ValueError + exception is raised). * Individual values are not stripped of spaces by default. When using a custom converter, make sure the function does remove spaces. - See Also - -------- - numpy.loadtxt : equivalent function when no data is missing. - """ # if usemask: @@ -1128,20 +1162,21 @@ def recfromtxt(fname, dtype=None, comments='#', delimiter=None, skiprows=0, excludelist=None, deletechars=None, case_sensitive=True, usemask=False): """ - Load ASCII data stored in fname and returns a standard recarray (if + Load ASCII data stored in fname and returns a standard recarray (if `usemask=False`) or a MaskedRecords (if `usemask=True`). - + Complete description of all the optional input parameters is available in the docstring of the `genfromtxt` function. - + See Also -------- numpy.genfromtxt : generic function - Warnings - -------- + Notes + ----- * by default, `dtype=None`, which means that the dtype of the output array will be determined from the data. + """ kwargs = dict(dtype=dtype, comments=comments, delimiter=delimiter, skiprows=skiprows, converters=converters, diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py index 269d332bf..0e1bafa91 100644 --- a/numpy/lib/scimath.py +++ b/numpy/lib/scimath.py @@ -166,7 +166,8 @@ def _fix_real_abs_gt_1(x): return x def sqrt(x): - """Return the square root of x. + """ + Return the square root of x. Parameters ---------- @@ -174,12 +175,29 @@ def sqrt(x): Returns ------- - array_like output. + out : array_like + + Notes + ----- + + As the numpy.sqrt, this returns the principal square root of x, which is + what most people mean when they use square root; the principal square root + of x is not any number z such as z^2 = x. + + For positive numbers, the principal square root is defined as the positive + number z such as z^2 = x. + + The principal square root of -1 is i, the principal square root of any + negative number -x is defined a i * sqrt(x). For any non zero complex + number, it is defined by using the following branch cut: x = r e^(i t) with + r > 0 and -pi < t <= pi. The principal square root is then + sqrt(r) e^(i t/2). Examples -------- For real, non-negative inputs this works just like numpy.sqrt(): + >>> np.lib.scimath.sqrt(1) 1.0 @@ -187,33 +205,20 @@ def sqrt(x): array([ 1., 2.]) But it automatically handles negative inputs: + >>> np.lib.scimath.sqrt(-1) (0.0+1.0j) >>> np.lib.scimath.sqrt([-1,4]) array([ 0.+1.j, 2.+0.j]) - Notes - ----- - - As the numpy.sqrt, this returns the principal square root of x, which is - what most people mean when they use square root; the principal square root - of x is not any number z such as z^2 = x. - - For positive numbers, the principal square root is defined as the positive - number z such as z^2 = x. - - The principal square root of -1 is i, the principal square root of any - negative number -x is defined a i * sqrt(x). For any non zero complex - number, it is defined by using the following branch cut: x = r e^(i t) with - r > 0 and -pi < t <= pi. The principal square root is then - sqrt(r) e^(i t/2). """ x = _fix_real_lt_zero(x) return nx.sqrt(x) def log(x): - """Return the natural logarithm of x. + """ + Return the natural logarithm of x. If x contains negative inputs, the answer is computed and returned in the complex domain. @@ -224,7 +229,7 @@ def log(x): Returns ------- - array_like + out : array_like Examples -------- @@ -237,12 +242,14 @@ def log(x): >>> np.lib.scimath.log(-math.exp(1)) == (1+1j*math.pi) True + """ x = _fix_real_lt_zero(x) return nx.log(x) def log10(x): - """Return the base 10 logarithm of x. + """ + Return the base 10 logarithm of x. If x contains negative inputs, the answer is computed and returned in the complex domain. @@ -253,12 +260,13 @@ def log10(x): Returns ------- - array_like + out : array_like Examples -------- (We set the printing precision so the example can be auto-tested) + >>> np.set_printoptions(precision=4) >>> np.lib.scimath.log10([10**1,10**2]) @@ -267,12 +275,14 @@ def log10(x): >>> np.lib.scimath.log10([-10**1,-10**2,10**2]) array([ 1.+1.3644j, 2.+1.3644j, 2.+0.j ]) + """ x = _fix_real_lt_zero(x) return nx.log10(x) def logn(n, x): - """Take log base n of x. + """ + Take log base n of x. If x contains negative inputs, the answer is computed and returned in the complex domain. @@ -283,12 +293,13 @@ def logn(n, x): Returns ------- - array_like + out : array_like Examples -------- (We set the printing precision so the example can be auto-tested) + >>> np.set_printoptions(precision=4) >>> np.lib.scimath.logn(2,[4,8]) @@ -296,13 +307,15 @@ def logn(n, x): >>> np.lib.scimath.logn(2,[-4,-8,8]) array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + """ x = _fix_real_lt_zero(x) n = _fix_real_lt_zero(n) return nx.log(x)/nx.log(n) def log2(x): - """ Take log base 2 of x. + """ + Take log base 2 of x. If x contains negative inputs, the answer is computed and returned in the complex domain. @@ -313,12 +326,13 @@ def log2(x): Returns ------- - array_like + out : array_like Examples -------- (We set the printing precision so the example can be auto-tested) + >>> np.set_printoptions(precision=4) >>> np.lib.scimath.log2([4,8]) @@ -326,12 +340,14 @@ def log2(x): >>> np.lib.scimath.log2([-4,-8,8]) array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + """ x = _fix_real_lt_zero(x) return nx.log2(x) def power(x, p): - """Return x**p. + """ + Return x**p. If x contains negative values, it is converted to the complex domain. @@ -344,11 +360,12 @@ def power(x, p): Returns ------- - array_like + out : array_like Examples -------- (We set the printing precision so the example can be auto-tested) + >>> np.set_printoptions(precision=4) >>> np.lib.scimath.power([2,4],2) @@ -359,6 +376,7 @@ def power(x, p): >>> np.lib.scimath.power([-2,4],2) array([ 4.+0.j, 16.+0.j]) + """ x = _fix_real_lt_zero(x) p = _fix_int_lt_zero(p) @@ -393,7 +411,8 @@ def arccos(x): return nx.arccos(x) def arcsin(x): - """Compute the inverse sine of x. + """ + Compute the inverse sine of x. For real x with abs(x)<=1, this returns the principal value. @@ -410,6 +429,7 @@ def arcsin(x): Examples -------- (We set the printing precision so the example can be auto-tested) + >>> np.set_printoptions(precision=4) >>> np.lib.scimath.arcsin(0) @@ -417,12 +437,14 @@ def arcsin(x): >>> np.lib.scimath.arcsin([0,1]) array([ 0. , 1.5708]) + """ x = _fix_real_abs_gt_1(x) return nx.arcsin(x) def arctanh(x): - """Compute the inverse hyperbolic tangent of x. + """ + Compute the inverse hyperbolic tangent of x. For real x with abs(x)<=1, this returns the principal value. @@ -434,7 +456,7 @@ def arctanh(x): Returns ------- - array_like + out : array_like Examples -------- @@ -446,6 +468,7 @@ def arctanh(x): >>> np.lib.scimath.arctanh([0,2]) array([ 0.0000+0.j , 0.5493-1.5708j]) + """ x = _fix_real_abs_gt_1(x) return nx.arctanh(x) diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index 19dd54f7a..a5bf4d0ea 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -892,6 +892,19 @@ def dsplit(ary,indices_or_sections): raise ValueError, 'vsplit only works on arrays of 3 or more dimensions' return split(ary,indices_or_sections,2) +def get_array_prepare(*args): + """Find the wrapper for the array with the highest priority. + + In case of ties, leftmost wins. If no wrapper is found, return None + """ + wrappers = [(getattr(x, '__array_priority__', 0), -i, + x.__array_prepare__) for i, x in enumerate(args) + if hasattr(x, '__array_prepare__')] + wrappers.sort() + if wrappers: + return wrappers[-1][-1] + return None + def get_array_wrap(*args): """Find the wrapper for the array with the highest priority. @@ -975,7 +988,6 @@ def kron(a,b): True """ - wrapper = get_array_wrap(a, b) b = asanyarray(b) a = array(a,copy=False,subok=True,ndmin=b.ndim) ndb, nda = b.ndim, a.ndim @@ -998,6 +1010,10 @@ def kron(a,b): axis = nd-1 for _ in xrange(nd): result = concatenate(result, axis=axis) + wrapper = get_array_prepare(a, b) + if wrapper is not None: + result = wrapper(result) + wrapper = get_array_wrap(a, b) if wrapper is not None: result = wrapper(result) return result @@ -1007,6 +1023,19 @@ def tile(A, reps): """ Construct an array by repeating A the number of times given by reps. + If `reps` has length ``d``, the result will have dimension of + ``max(d, A.ndim)``. + + If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new + axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, + or shape (1, 1, 3) for 3-D replication. If this is not the desired + behavior, promote `A` to d-dimensions manually before calling this + function. + + If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it. + Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as + (1, 1, 2, 2). + Parameters ---------- A : array_like @@ -1017,24 +1046,11 @@ def tile(A, reps): Returns ------- c : ndarray - The output array. + The tiled output array. See Also -------- - repeat - - Notes - ----- - If `reps` has length d, the result will have dimension of max(d, `A`.ndim). - - If `A`.ndim < d, `A` is promoted to be d-dimensional by prepending new - axes. So a shape (3,) array is promoted to (1,3) for 2-D replication, - or shape (1,1,3) for 3-D replication. If this is not the desired behavior, - promote `A` to d-dimensions manually before calling this function. - - If `A`.ndim > d, `reps` is promoted to `A`.ndim by pre-pending 1's to it. - Thus for an `A` of shape (2,3,4,5), a `reps` of (2,2) is treated as - (1,1,2,2). + repeat : Repeat elements of an array. Examples -------- @@ -1046,7 +1062,6 @@ def tile(A, reps): [0, 1, 2, 0, 1, 2]]) >>> np.tile(a, (2, 1, 2)) array([[[0, 1, 2, 0, 1, 2]], - <BLANKLINE> [[0, 1, 2, 0, 1, 2]]]) >>> b = np.array([[1, 2], [3, 4]]) diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 40bc11f6e..92305129a 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -9,39 +9,61 @@ from numpy.lib.arraysetops import * import warnings class TestAso(TestCase): - def test_unique1d( self ): + def test_unique( self ): a = np.array( [5, 7, 1, 2, 1, 5, 7] ) ec = np.array( [1, 2, 5, 7] ) - c = unique1d( a ) + c = unique( a ) assert_array_equal( c, ec ) warnings.simplefilter('ignore', Warning) - unique, indices = unique1d( a, return_index=True ) + vals, indices = unique( a, return_index=True ) warnings.resetwarnings() ed = np.array( [2, 3, 0, 1] ) - assert_array_equal(unique, ec) + assert_array_equal(vals, ec) assert_array_equal(indices, ed) - assert_array_equal([], unique1d([])) + warnings.simplefilter('ignore', Warning) + vals, ind0, ind1 = unique( a, return_index=True, + return_inverse=True ) + warnings.resetwarnings() + + ee = np.array( [2, 3, 0, 1, 0, 2, 3] ) + assert_array_equal(vals, ec) + assert_array_equal(ind0, ed) + assert_array_equal(ind1, ee) + + assert_array_equal([], unique([])) def test_intersect1d( self ): + # unique inputs a = np.array( [5, 7, 1, 2] ) b = np.array( [2, 4, 3, 1, 5] ) ec = np.array( [1, 2, 5] ) - c = intersect1d( a, b ) + c = intersect1d( a, b, assume_unique=True ) assert_array_equal( c, ec ) + # non-unique inputs + a = np.array( [5, 5, 7, 1, 2] ) + b = np.array( [2, 1, 4, 3, 3, 1, 5] ) + + ed = np.array( [1, 2, 5] ) + c = intersect1d( a, b ) + assert_array_equal( c, ed ) + assert_array_equal([], intersect1d([],[])) def test_intersect1d_nu( self ): + # This should be removed when intersect1d_nu is removed. a = np.array( [5, 5, 7, 1, 2] ) b = np.array( [2, 1, 4, 3, 3, 1, 5] ) ec = np.array( [1, 2, 5] ) + warnings.simplefilter('ignore', Warning) c = intersect1d_nu( a, b ) + warnings.resetwarnings() assert_array_equal( c, ec ) assert_array_equal([], intersect1d_nu([],[])) @@ -83,11 +105,14 @@ class TestAso(TestCase): assert_array_equal([1],ediff1d(two_elem)) def test_setmember1d( self ): + # This should be removed when setmember1d is removed. a = np.array( [5, 7, 1, 2] ) b = np.array( [2, 4, 3, 1, 5] ) ec = np.array( [True, False, True, True] ) + warnings.simplefilter('ignore', Warning) c = setmember1d( a, b ) + warnings.resetwarnings() assert_array_equal( c, ec ) a[0] = 8 @@ -102,51 +127,77 @@ class TestAso(TestCase): assert_array_equal([], setmember1d([],[])) - def test_setmember1d_nu(self): + def test_in1d(self): + a = np.array( [5, 7, 1, 2] ) + b = np.array( [2, 4, 3, 1, 5] ) + + ec = np.array( [True, False, True, True] ) + c = in1d( a, b, assume_unique=True ) + assert_array_equal( c, ec ) + + a[0] = 8 + ec = np.array( [False, False, True, True] ) + c = in1d( a, b, assume_unique=True ) + assert_array_equal( c, ec ) + + a[0], a[3] = 4, 8 + ec = np.array( [True, False, True, False] ) + c = in1d( a, b, assume_unique=True ) + assert_array_equal( c, ec ) + a = np.array([5,4,5,3,4,4,3,4,3,5,2,1,5,5]) b = [2,3,4] ec = [False, True, False, True, True, True, True, True, True, False, True, False, False, False] - c = setmember1d_nu(a, b) + c = in1d(a, b) assert_array_equal(c, ec) b = b + [5, 5, 4] ec = [True, True, True, True, True, True, True, True, True, True, True, False, True, True] - c = setmember1d_nu(a, b) + c = in1d(a, b) assert_array_equal(c, ec) a = np.array([5, 7, 1, 2]) b = np.array([2, 4, 3, 1, 5]) ec = np.array([True, False, True, True]) - c = setmember1d_nu(a, b) + c = in1d(a, b) assert_array_equal(c, ec) a = np.array([5, 7, 1, 1, 2]) b = np.array([2, 4, 3, 3, 1, 5]) ec = np.array([True, False, True, True, True]) - c = setmember1d_nu(a, b) + c = in1d(a, b) assert_array_equal(c, ec) a = np.array([5]) b = np.array([2]) ec = np.array([False]) - c = setmember1d_nu(a, b) + c = in1d(a, b) assert_array_equal(c, ec) a = np.array([5, 5]) b = np.array([2, 2]) ec = np.array([False, False]) - c = setmember1d_nu(a, b) + c = in1d(a, b) assert_array_equal(c, ec) - assert_array_equal(setmember1d_nu([], []), []) + assert_array_equal(in1d([], []), []) + + def test_in1d_char_array( self ): + a = np.array(['a', 'b', 'c','d','e','c','e','b']) + b = np.array(['a','c']) + + ec = np.array([True, False, True, False, False, True, False, False]) + c = in1d(a, b) + + assert_array_equal(c, ec) def test_union1d( self ): a = np.array( [5, 4, 7, 1, 2] ) @@ -159,7 +210,7 @@ class TestAso(TestCase): assert_array_equal([], union1d([],[])) def test_setdiff1d( self ): - a = np.array( [6, 5, 4, 7, 1, 2] ) + a = np.array( [6, 5, 4, 7, 1, 2, 7, 4] ) b = np.array( [2, 4, 3, 3, 2, 1, 5] ) ec = np.array( [6, 7] ) @@ -180,14 +231,6 @@ class TestAso(TestCase): assert_array_equal(setdiff1d(a,b),np.array(['c'])) def test_manyways( self ): - nItem = 100 - a = np.fix( nItem / 10 * np.random.random( nItem ) ) - b = np.fix( nItem / 10 * np.random.random( nItem ) ) - - c1 = intersect1d_nu( a, b ) - c2 = unique1d( intersect1d( a, b ) ) - assert_array_equal( c1, c2 ) - a = np.array( [5, 7, 1, 2, 8] ) b = np.array( [9, 8, 2, 4, 3, 1, 5] ) diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py index 1ac14b561..c1d77c517 100644 --- a/numpy/lib/tests/test_financial.py +++ b/numpy/lib/tests/test_financial.py @@ -36,13 +36,18 @@ class TestFinancial(TestCase): 117.04, 2) def test_mirr(self): - v1 = [-4500,-800,800,800,600,600,800,800,700,3000] - assert_almost_equal(np.mirr(v1,0.08,0.055), - 0.0665, 4) + val = [-4500,-800,800,800,600,600,800,800,700,3000] + assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4) + + val = [-120000,39000,30000,21000,37000,46000] + assert_almost_equal(np.mirr(val, 0.10, 0.12), 0.126094, 6) + + val = [100,200,-50,300,-200] + assert_almost_equal(np.mirr(val, 0.05, 0.06), 0.3428, 4) + + val = [39000,30000,21000,37000,46000] + assert_(np.isnan(np.mirr(val, 0.10, 0.12))) - v2 = [-120000,39000,30000,21000,37000,46000] - assert_almost_equal(np.mirr(v2,0.10,0.12), - 0.1344, 4) def test_unimplemented(): diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py index 47529502d..d7e61799a 100644 --- a/numpy/lib/tests/test_index_tricks.py +++ b/numpy/lib/tests/test_index_tricks.py @@ -1,5 +1,8 @@ from numpy.testing import * -from numpy import array, ones, r_, mgrid, unravel_index +import numpy as np +from numpy import ( array, ones, r_, mgrid, unravel_index, zeros, where, + ndenumerate, fill_diagonal, diag_indices, + diag_indices_from ) class TestUnravelIndex(TestCase): def test_basic(self): @@ -62,5 +65,60 @@ class TestConcatenator(TestCase): assert_array_equal(d[5:,:],c) +class TestNdenumerate(TestCase): + def test_basic(self): + a = array([[1,2], [3,4]]) + assert_equal(list(ndenumerate(a)), + [((0,0), 1), ((0,1), 2), ((1,0), 3), ((1,1), 4)]) + + +def test_fill_diagonal(): + a = zeros((3, 3),int) + fill_diagonal(a, 5) + yield (assert_array_equal, a, + array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]])) + + # The same function can operate on a 4-d array: + a = zeros((3, 3, 3, 3), int) + fill_diagonal(a, 4) + i = array([0, 1, 2]) + yield (assert_equal, where(a != 0), (i, i, i, i)) + + +def test_diag_indices(): + di = diag_indices(4) + a = array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + a[di] = 100 + yield (assert_array_equal, a, + array([[100, 2, 3, 4], + [ 5, 100, 7, 8], + [ 9, 10, 100, 12], + [ 13, 14, 15, 100]])) + + # Now, we create indices to manipulate a 3-d array: + d3 = diag_indices(2, 3) + + # And use it to set the diagonal of a zeros array to 1: + a = zeros((2, 2, 2),int) + a[d3] = 1 + yield (assert_array_equal, a, + array([[[1, 0], + [0, 0]], + + [[0, 0], + [0, 1]]]) ) + +def test_diag_indices_from(): + x = np.random.random((4, 4)) + r, c = diag_indices_from(x) + assert_array_equal(r, np.arange(4)) + assert_array_equal(c, np.arange(4)) + + if __name__ == "__main__": run_module_suite() diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index e5a73a86a..185ceef7c 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -916,5 +916,31 @@ def test_gzip_loadtxt_from_string(): f = gzip.GzipFile(fileobj=s, mode="r") assert_array_equal(np.loadtxt(f), [1, 2, 3]) +def test_npzfile_dict(): + s = StringIO.StringIO() + x = np.zeros((3, 3)) + y = np.zeros((3, 3)) + + np.savez(s, x=x, y=y) + s.seek(0) + + z = np.load(s) + + assert 'x' in z + assert 'y' in z + assert 'x' in z.keys() + assert 'y' in z.keys() + + for f, a in z.iteritems(): + assert f in ['x', 'y'] + assert_equal(a.shape, (3, 3)) + + assert len(z.items()) == 2 + + for f in z: + assert f in ['x', 'y'] + + assert 'x' in list(z.iterkeys()) + if __name__ == "__main__": run_module_suite() diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py index b8c487962..5abf9aefe 100644 --- a/numpy/lib/tests/test_regression.py +++ b/numpy/lib/tests/test_regression.py @@ -48,6 +48,10 @@ class TestRegression(object): """Ticket 928.""" assert_raises(ValueError, np.histogramdd, np.ones((1,10)), bins=2**10) + def test_ndenumerate_crash(self): + """Ticket 1140""" + # Shouldn't crash: + list(np.ndenumerate(np.array([[]]))) if __name__ == "__main__": run_module_suite() diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py index 32c4ca58e..5d850f9fd 100644 --- a/numpy/lib/tests/test_twodim_base.py +++ b/numpy/lib/tests/test_twodim_base.py @@ -3,8 +3,11 @@ """ from numpy.testing import * -from numpy import arange, rot90, add, fliplr, flipud, zeros, ones, eye, \ - array, diag, histogram2d, tri + +from numpy import ( arange, rot90, add, fliplr, flipud, zeros, ones, eye, + array, diag, histogram2d, tri, mask_indices, triu_indices, + triu_indices_from, tril_indices, tril_indices_from ) + import numpy as np def get_mat(n): @@ -50,34 +53,68 @@ class TestEye(TestCase): [1,0,0], [0,1,0]])) + def test_eye_bounds(self): + assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]]) + assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]]) + assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]]) + assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]]) + assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]]) + assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]]) + assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]]) + + def test_strings(self): + assert_equal(eye(2, 2, dtype='S3'), [['1', ''], ['', '1']]) + + def test_bool(self): + assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]]) + class TestDiag(TestCase): def test_vector(self): - vals = (100*arange(5)).astype('l') - b = zeros((5,5)) + vals = (100 * arange(5)).astype('l') + b = zeros((5, 5)) for k in range(5): - b[k,k] = vals[k] - assert_equal(diag(vals),b) - b = zeros((7,7)) + b[k, k] = vals[k] + assert_equal(diag(vals), b) + b = zeros((7, 7)) c = b.copy() for k in range(5): - b[k,k+2] = vals[k] - c[k+2,k] = vals[k] - assert_equal(diag(vals,k=2), b) - assert_equal(diag(vals,k=-2), c) + b[k, k + 2] = vals[k] + c[k + 2, k] = vals[k] + assert_equal(diag(vals, k=2), b) + assert_equal(diag(vals, k=-2), c) - def test_matrix(self): - vals = (100*get_mat(5)+1).astype('l') + def test_matrix(self, vals=None): + if vals is None: + vals = (100 * get_mat(5) + 1).astype('l') b = zeros((5,)) for k in range(5): b[k] = vals[k,k] - assert_equal(diag(vals),b) - b = b*0 + assert_equal(diag(vals), b) + b = b * 0 for k in range(3): - b[k] = vals[k,k+2] - assert_equal(diag(vals,2),b[:3]) + b[k] = vals[k, k + 2] + assert_equal(diag(vals, 2), b[:3]) for k in range(3): - b[k] = vals[k+2,k] - assert_equal(diag(vals,-2),b[:3]) + b[k] = vals[k + 2, k] + assert_equal(diag(vals, -2), b[:3]) + + def test_fortran_order(self): + vals = array((100 * get_mat(5) + 1), order='F', dtype='l') + self.test_matrix(vals) + + def test_diag_bounds(self): + A = [[1, 2], [3, 4], [5, 6]] + assert_equal(diag(A, k=2), []) + assert_equal(diag(A, k=1), [2]) + assert_equal(diag(A, k=0), [1, 4]) + assert_equal(diag(A, k=-1), [3, 6]) + assert_equal(diag(A, k=-2), [5]) + assert_equal(diag(A, k=-3), []) + + def test_failure(self): + self.failUnlessRaises(ValueError, diag, [[[1]]]) class TestFliplr(TestCase): def test_basic(self): @@ -193,5 +230,76 @@ class TestTri(TestCase): assert_array_equal(tri(3,dtype=bool),out.astype(bool)) +def test_mask_indices(): + # simple test without offset + iu = mask_indices(3, np.triu) + a = np.arange(9).reshape(3, 3) + yield (assert_array_equal, a[iu], array([0, 1, 2, 4, 5, 8])) + # Now with an offset + iu1 = mask_indices(3, np.triu, 1) + yield (assert_array_equal, a[iu1], array([1, 2, 5])) + + +def test_tril_indices(): + # indices without and with offset + il1 = tril_indices(4) + il2 = tril_indices(4, 2) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + + # indexing: + yield (assert_array_equal, a[il1], + array([ 1, 5, 6, 9, 10, 11, 13, 14, 15, 16]) ) + + # And for assigning values: + a[il1] = -1 + yield (assert_array_equal, a, + array([[-1, 2, 3, 4], + [-1, -1, 7, 8], + [-1, -1, -1, 12], + [-1, -1, -1, -1]]) ) + + # These cover almost the whole array (two diagonals right of the main one): + a[il2] = -10 + yield (assert_array_equal, a, + array([[-10, -10, -10, 4], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]]) ) + + +def test_triu_indices(): + iu1 = triu_indices(4) + iu2 = triu_indices(4, 2) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + + # Both for indexing: + yield (assert_array_equal, a[iu1], + array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16])) + + # And for assigning values: + a[iu1] = -1 + yield (assert_array_equal, a, + array([[-1, -1, -1, -1], + [ 5, -1, -1, -1], + [ 9, 10, -1, -1], + [13, 14, 15, -1]]) ) + + # These cover almost the whole array (two diagonals right of the main one): + a[iu2] = -10 + yield ( assert_array_equal, a, + array([[ -1, -1, -10, -10], + [ 5, -1, -1, -10], + [ 9, 10, -1, -1], + [ 13, 14, 15, -1]]) ) + + if __name__ == "__main__": run_module_suite() diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index f0abf3122..e794d4144 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -3,10 +3,13 @@ """ __all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu', - 'tril','vander','histogram2d'] + 'tril','vander','histogram2d','mask_indices', + 'tril_indices','tril_indices_from','triu_indices','triu_indices_from', + ] from numpy.core.numeric import asanyarray, equal, subtract, arange, \ - zeros, greater_equal, multiply, ones, asarray + zeros, greater_equal, multiply, ones, asarray, alltrue, where, \ + empty def fliplr(m): """ @@ -195,10 +198,16 @@ def eye(N, M=None, k=0, dtype=float): [ 0., 0., 0.]]) """ - if M is None: M = N - m = equal(subtract.outer(arange(N), arange(M)),-k) - if m.dtype != dtype: - m = m.astype(dtype) + if M is None: + M = N + m = zeros((N, M), dtype=dtype) + if k >= M: + return m + if k >= 0: + i = k + else: + i = (-k) * M + m[:M-k].flat[i::M+1] = 1 return m def diag(v, k=0): @@ -244,28 +253,26 @@ def diag(v, k=0): """ v = asarray(v) s = v.shape - if len(s)==1: + if len(s) == 1: n = s[0]+abs(k) res = zeros((n,n), v.dtype) - if (k>=0): - i = arange(0,n-k) - fi = i+k+i*n + if k >= 0: + i = k else: - i = arange(0,n+k) - fi = i+(i-k)*n - res.flat[fi] = v + i = (-k) * n + res[:n-k].flat[i::n+1] = v return res - elif len(s)==2: - N1,N2 = s + elif len(s) == 2: + if k >= s[1]: + return empty(0, dtype=v.dtype) + if v.flags.f_contiguous: + # faster slicing + v, k, s = v.T, -k, s[::-1] if k >= 0: - M = min(N1,N2-k) - i = arange(0,M) - fi = i+k+i*N2 + i = k else: - M = min(N1+k,N2) - i = arange(0,M) - fi = i + (i-k)*N2 - return v.flat[fi] + i = (-k) * s[1] + return v[:s[1]-k].flat[i::s[1]+1] else: raise ValueError, "Input must be 1- or 2-d." @@ -559,3 +566,233 @@ def histogram2d(x,y, bins=10, range=None, normed=False, weights=None): bins = [xedges, yedges] hist, edges = histogramdd([x,y], bins, range, normed, weights) return hist, edges[0], edges[1] + + +def mask_indices(n,mask_func,k=0): + """Return the indices to access (n,n) arrays, given a masking function. + + Assume mask_func() is a function that, for a square array a of size (n,n) + with a possible offset argument k, when called as mask_func(a,k) returns a + new array with zeros in certain locations (functions like triu() or tril() + do precisely this). Then this function returns the indices where the + non-zero values would be located. + + Parameters + ---------- + n : int + The returned indices will be valid to access arrays of shape (n,n). + + mask_func : callable + A function whose api is similar to that of numpy.tri{u,l}. That is, + mask_func(x,k) returns a boolean array, shaped like x. k is an optional + argument to the function. + + k : scalar + An optional argument which is passed through to mask_func(). Functions + like tri{u,l} take a second argument that is interpreted as an offset. + + Returns + ------- + indices : an n-tuple of index arrays. + The indices corresponding to the locations where mask_func(ones((n,n)),k) + is True. + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + These are the indices that would allow you to access the upper triangular + part of any 3x3 array: + >>> iu = mask_indices(3,np.triu) + + For example, if `a` is a 3x3 array: + >>> a = np.arange(9).reshape(3,3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + + Then: + >>> a[iu] + array([0, 1, 2, 4, 5, 8]) + + An offset can be passed also to the masking function. This gets us the + indices starting on the first diagonal right of the main one: + >>> iu1 = mask_indices(3,np.triu,1) + + with which we now extract only three elements: + >>> a[iu1] + array([1, 2, 5]) + """ + m = ones((n,n),int) + a = mask_func(m,k) + return where(a != 0) + + +def tril_indices(n,k=0): + """Return the indices for the lower-triangle of an (n,n) array. + + Parameters + ---------- + n : int + Sets the size of the arrays for which the returned indices will be valid. + + k : int, optional + Diagonal offset (see tril() for details). + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Commpute two different sets of indices to access 4x4 arrays, one for the + lower triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> il1 = tril_indices(4) + >>> il2 = tril_indices(4,2) + + Here is how they can be used with a sample array: + >>> a = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]) + >>> a + array([[ 1, 2, 3, 4], + [ 5, 6, 7, 8], + [ 9, 10, 11, 12], + [13, 14, 15, 16]]) + + Both for indexing: + >>> a[il1] + array([ 1, 5, 6, 9, 10, 11, 13, 14, 15, 16]) + + And for assigning values: + >>> a[il1] = -1 + >>> a + array([[-1, 2, 3, 4], + [-1, -1, 7, 8], + [-1, -1, -1, 12], + [-1, -1, -1, -1]]) + + These cover almost the whole array (two diagonals right of the main one): + >>> a[il2] = -10 + >>> a + array([[-10, -10, -10, 4], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]]) + + See also + -------- + - triu_indices : similar function, for upper-triangular. + - mask_indices : generic function accepting an arbitrary mask function. + """ + return mask_indices(n,tril,k) + + +def tril_indices_from(arr,k=0): + """Return the indices for the lower-triangle of an (n,n) array. + + See tril_indices() for full details. + + Parameters + ---------- + n : int + Sets the size of the arrays for which the returned indices will be valid. + + k : int, optional + Diagonal offset (see tril() for details). + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + if not arr.ndim==2 and arr.shape[0] == arr.shape[1]: + raise ValueError("input array must be 2-d and square") + return tril_indices(arr.shape[0],k) + + +def triu_indices(n,k=0): + """Return the indices for the upper-triangle of an (n,n) array. + + Parameters + ---------- + n : int + Sets the size of the arrays for which the returned indices will be valid. + + k : int, optional + Diagonal offset (see triu() for details). + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Commpute two different sets of indices to access 4x4 arrays, one for the + lower triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> iu1 = triu_indices(4) + >>> iu2 = triu_indices(4,2) + + Here is how they can be used with a sample array: + >>> a = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]) + >>> a + array([[ 1, 2, 3, 4], + [ 5, 6, 7, 8], + [ 9, 10, 11, 12], + [13, 14, 15, 16]]) + + Both for indexing: + >>> a[il1] + array([ 1, 5, 6, 9, 10, 11, 13, 14, 15, 16]) + + And for assigning values: + >>> a[iu] = -1 + >>> a + array([[-1, -1, -1, -1], + [ 5, -1, -1, -1], + [ 9, 10, -1, -1], + [13, 14, 15, -1]]) + + These cover almost the whole array (two diagonals right of the main one): + >>> a[iu2] = -10 + >>> a + array([[ -1, -1, -10, -10], + [ 5, -1, -1, -10], + [ 9, 10, -1, -1], + [ 13, 14, 15, -1]]) + + See also + -------- + - tril_indices : similar function, for lower-triangular. + - mask_indices : generic function accepting an arbitrary mask function. + """ + return mask_indices(n,triu,k) + + +def triu_indices_from(arr,k=0): + """Return the indices for the lower-triangle of an (n,n) array. + + See triu_indices() for full details. + + Parameters + ---------- + n : int + Sets the size of the arrays for which the returned indices will be valid. + + k : int, optional + Diagonal offset (see triu() for details). + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + if not arr.ndim==2 and arr.shape[0] == arr.shape[1]: + raise ValueError("input array must be 2-d and square") + return triu_indices(arr.shape[0],k) + diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py index 113cec682..69f4f2193 100644 --- a/numpy/lib/type_check.py +++ b/numpy/lib/type_check.py @@ -85,8 +85,8 @@ def real(val): Returns ------- out : ndarray - If `val` is real, the type of `val` is used for the output. If `val` - has complex elements, the returned type is float. + Output array. If `val` is real, the type of `val` is used for the + output. If `val` has complex elements, the returned type is float. See Also -------- @@ -94,13 +94,13 @@ def real(val): Examples -------- - >>> a = np.array([1+2j,3+4j,5+6j]) + >>> a = np.array([1+2j, 3+4j, 5+6j]) >>> a.real array([ 1., 3., 5.]) >>> a.real = 9 >>> a array([ 9.+2.j, 9.+4.j, 9.+6.j]) - >>> a.real = np.array([9,8,7]) + >>> a.real = np.array([9, 8, 7]) >>> a array([ 9.+2.j, 8.+4.j, 7.+6.j]) @@ -109,7 +109,7 @@ def real(val): def imag(val): """ - Return the imaginary part of array. + Return the imaginary part of the elements of the array. Parameters ---------- @@ -118,8 +118,22 @@ def imag(val): Returns ------- - out : ndarray, real or int - Real part of each element, same shape as `val`. + out : ndarray + Output array. If `val` is real, the type of `val` is used for the + output. If `val` has complex elements, the returned type is float. + + See Also + -------- + real, angle, real_if_close + + Examples + -------- + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.imag + array([ 2., 4., 6.]) + >>> a.imag = np.array([8, 10, 12]) + >>> a + array([ 1. +8.j, 3.+10.j, 5.+12.j]) """ return asanyarray(val).imag diff --git a/numpy/lib/ufunclike.py b/numpy/lib/ufunclike.py index 5dbc3f225..5e89b0930 100644 --- a/numpy/lib/ufunclike.py +++ b/numpy/lib/ufunclike.py @@ -176,7 +176,7 @@ def isneginf(x, y=None): _log2 = nx.log(2) def log2(x, y=None): """ - Return the base 2 logarithm. + Return the base 2 logarithm of the input array, element-wise. Parameters ---------- @@ -188,7 +188,7 @@ def log2(x, y=None): Returns ------- y : ndarray - The logarithm to the base 2 of `x` elementwise. + The logarithm to the base 2 of `x` element-wise. NaNs are returned where `x` is negative. See Also @@ -197,7 +197,7 @@ def log2(x, y=None): Examples -------- - >>> np.log2([-1,2,4]) + >>> np.log2([-1, 2, 4]) array([ NaN, 1., 2.]) """ diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index 3de0579df..908c4995d 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -81,12 +81,34 @@ else: return func def deprecate(func, oldname=None, newname=None): - """Deprecate old functions. + """ + Deprecate old functions. + Issues a DeprecationWarning, adds warning to oldname's docstring, rebinds oldname.__name__ and returns new function object. - Example: - oldfunc = deprecate(newfunc, 'oldfunc', 'newfunc') + Parameters + ---------- + func : function + + oldname : string + + newname : string + + Returns + ------- + old_func : function + + Examples + -------- + Note that olduint returns a value after printing Deprecation Warning. + + >>> olduint = np.deprecate(np.uint) + >>> olduint(6) + /usr/lib/python2.5/site-packages/numpy/lib/utils.py:114: + DeprecationWarning: uint32 is deprecated + warnings.warn(str1, DeprecationWarning) + 6 """ @@ -186,13 +208,28 @@ def byte_bounds(a): def may_share_memory(a, b): - """Determine if two arrays can share memory + """ + Determine if two arrays can share memory The memory-bounds of a and b are computed. If they overlap then this function returns True. Otherwise, it returns False. A return of True does not necessarily mean that the two arrays share any element. It just means that they *might*. + + Parameters + ---------- + a, b : ndarray + + Returns + ------- + out : bool + + Examples + -------- + >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) + False + """ a_low, a_high = byte_bounds(a) b_low, b_high = byte_bounds(b) @@ -349,24 +386,46 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'): Parameters ---------- - object : optional - Input object to get information about. + object : object or str, optional + Input object or name to get information about. If `object` is a + numpy object, its docstring is given. If it is a string, available + modules are searched for matching objects. + If None, information about `info` itself is returned. maxwidth : int, optional Printing width. - output : file like object open for writing, optional - Write into file like object. - toplevel : string, optional + output : file like object, optional + File like object that the output is written to, default is ``stdout``. + The object has to be opened in 'w' or 'a' mode. + toplevel : str, optional Start search at this level. + See Also + -------- + source, lookfor + + Notes + ----- + When used interactively with an object, ``np.info(obj)`` is equivalent to + ``help(obj)`` on the Python prompt or ``obj?`` on the IPython prompt. + Examples -------- >>> np.info(np.polyval) # doctest: +SKIP - polyval(p, x) + Evaluate the polynomial p at x. + ... - Evaluate the polymnomial p at x. + When using a string for `object` it is possible to get multiple results. - ... + >>> np.info('fft') # doctest: +SKIP + *** Found in numpy *** + Core FFT routines + ... + *** Found in numpy.fft *** + fft(a, n=None, axis=-1) + ... + *** Repeat reference found in numpy.fft.fftpack *** + *** Total of 3 references found. *** """ global _namedict, _dictlist @@ -512,15 +571,39 @@ def source(object, output=sys.stdout): """ Print or write to a file the source code for a Numpy object. + The source code is only returned for objects written in Python. Many + functions and classes are defined in C and will therefore not return + useful information. + Parameters ---------- object : numpy object - Input object. + Input object. This can be any object (function, class, module, ...). output : file object, optional If `output` not supplied then source code is printed to screen (sys.stdout). File object must be created with either write 'w' or append 'a' modes. + See Also + -------- + lookfor, info + + Examples + -------- + >>> np.source(np.interp) + In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py + def interp(x, xp, fp, left=None, right=None): + \"\"\".... (full docstring printed)\"\"\" + if isinstance(x, (float, int, number)): + return compiled_interp([x], xp, fp, left, right).item() + else: + return compiled_interp(x, xp, fp, left, right) + + The source code is only returned for objects written in Python. + + >>> np.source(np.array) + Not available for this object. + """ # Local import to speed up numpy's import time. import inspect @@ -544,28 +627,41 @@ def lookfor(what, module=None, import_modules=True, regenerate=False): Do a keyword search on docstrings. A list of of objects that matched the search is displayed, - sorted by relevance. + sorted by relevance. All given keywords need to be found in the + docstring for it to be returned as a result, but the order does + not matter. Parameters ---------- what : str String containing words to look for. - module : str, module - Module whose docstrings to go through. - import_modules : bool + module : str, optional + Name of module whose docstrings to go through. + import_modules : bool, optional Whether to import sub-modules in packages. - Will import only modules in ``__all__``. - regenerate : bool - Whether to re-generate the docstring cache. + Will import only modules in ``__all__``. Default is True. + regenerate : bool, optional + Whether to re-generate the docstring cache. Default is False. - Examples + See Also -------- + source, info + + Notes + ----- + Relevance is determined only roughly, by checking if the keywords occur + in the function name, at the start of a docstring, etc. + Examples + -------- >>> np.lookfor('binary representation') Search results for 'binary representation' ------------------------------------------ numpy.binary_repr Return the binary representation of the input number as a string. + numpy.base_repr + Return a string representation of a number in the given base system. + ... """ import pydoc diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index dcf7fde26..5878b909f 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -30,7 +30,7 @@ class LinAlgError(Exception): def _makearray(a): new = asarray(a) - wrap = getattr(a, "__array_wrap__", new.__array_wrap__) + wrap = getattr(a, "__array_prepare__", new.__array_wrap__) return new, wrap def isComplexType(t): @@ -1237,7 +1237,7 @@ def lstsq(a, b, rcond=-1): Notes ----- - If `b` is a matrix, then all array results returned as + If `b` is a matrix, then all array results are returned as matrices. Examples diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 5cf11ffb9..3071ff5fe 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -210,7 +210,49 @@ def minimum_fill_value(obj): def maximum_fill_value(obj): """ - Calculate the default fill value suitable for taking the maximum of ``obj``. + Return the minimum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the maximum of an array with a given dtype. + + Parameters + ---------- + obj : {ndarray, dtype} + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The minimum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.int32() + >>> ma.maximum_fill_value(a) + -2147483648 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.maximum_fill_value(a) + -inf """ errmsg = "Unsuitable type for calculating maximum." @@ -452,7 +494,7 @@ def getdata(a, subok=True): Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. subok : bool Whether to force the output to be a `pure` ndarray (False) or to - return a subclass of ndarray if approriate (True, default). + return a subclass of ndarray if appropriate (True, default). See Also -------- @@ -2471,7 +2513,10 @@ class MaskedArray(ndarray): self._mask = _mask # Finalize the mask ........... if self._mask is not nomask: - self._mask.shape = self.shape + try: + self._mask.shape = self.shape + except ValueError: + self._mask = nomask return #.................................. def __array_wrap__(self, obj, context=None): @@ -3126,6 +3171,8 @@ class MaskedArray(ndarray): #............................................ def __eq__(self, other): "Check whether other equals self elementwise" + if self is masked: + return masked omask = getattr(other, '_mask', nomask) if omask is nomask: check = ndarray.__eq__(self.filled(0), other).view(type(self)) @@ -3152,6 +3199,8 @@ class MaskedArray(ndarray): # def __ne__(self, other): "Check whether other doesn't equal self elementwise" + if self is masked: + return masked omask = getattr(other, '_mask', nomask) if omask is nomask: check = ndarray.__ne__(self.filled(0), other).view(type(self)) @@ -3723,52 +3772,49 @@ class MaskedArray(ndarray): def cumsum(self, axis=None, dtype=None, out=None): """ - Return the cumulative sum of the elements along the given axis. - The cumulative sum is calculated over the flattened array by - default, otherwise over the specified axis. + Return the cumulative sum of the elements along the given axis. + The cumulative sum is calculated over the flattened array by + default, otherwise over the specified axis. - Masked values are set to 0 internally during the computation. - However, their position is saved, and the result will be masked at - the same locations. + Masked values are set to 0 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. - Parameters - ---------- - axis : {None, -1, int}, optional - Axis along which the sum is computed. The default (`axis` = None) is to - compute over the flattened array. `axis` may be negative, in which case - it counts from the last to the first axis. - dtype : {None, dtype}, optional - Type of the returned array and of the accumulator in which the - elements are summed. If `dtype` is not specified, it defaults - to the dtype of `a`, unless `a` has an integer dtype with a - precision less than that of the default platform integer. In - that case, the default platform integer is used. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. - - Warnings - -------- - The mask is lost if out is not a valid :class:`MaskedArray` ! + Parameters + ---------- + axis : {None, -1, int}, optional + Axis along which the sum is computed. The default (`axis` = None) is to + compute over the flattened array. `axis` may be negative, in which case + it counts from the last to the first axis. + dtype : {None, dtype}, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. - Returns - ------- - cumsum : ndarray. - A new array holding the result is returned unless ``out`` is - specified, in which case a reference to ``out`` is returned. + Returns + ------- + cumsum : ndarray. + A new array holding the result is returned unless ``out`` is + specified, in which case a reference to ``out`` is returned. - Examples - -------- - >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) - >>> print marr.cumsum() - [0 1 3 -- -- -- 9 16 24 33] + Notes + ----- + The mask is lost if `out` is not a valid :class:`MaskedArray` ! + Arithmetic is modular when using integer types, and no error is + raised on overflow. - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. + Examples + -------- + >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) + >>> print marr.cumsum() + [0 1 3 -- -- -- 9 16 24 33] """ result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) @@ -3853,46 +3899,44 @@ class MaskedArray(ndarray): def cumprod(self, axis=None, dtype=None, out=None): """ - Return the cumulative product of the elements along the given axis. - The cumulative product is taken over the flattened array by - default, otherwise over the specified axis. + Return the cumulative product of the elements along the given axis. + The cumulative product is taken over the flattened array by + default, otherwise over the specified axis. - Masked values are set to 1 internally during the computation. - However, their position is saved, and the result will be masked at - the same locations. + Masked values are set to 1 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. - Parameters - ---------- - axis : {None, -1, int}, optional - Axis along which the product is computed. The default - (`axis` = None) is to compute over the flattened array. - dtype : {None, dtype}, optional - Determines the type of the returned array and of the accumulator - where the elements are multiplied. If ``dtype`` has the value ``None`` and - the type of ``a`` is an integer type of precision less than the default - platform integer, then the default platform integer precision is - used. Otherwise, the dtype is the same as that of ``a``. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. - - Warnings - -------- - The mask is lost if out is not a valid MaskedArray ! + Parameters + ---------- + axis : {None, -1, int}, optional + Axis along which the product is computed. The default + (`axis` = None) is to compute over the flattened array. + dtype : {None, dtype}, optional + Determines the type of the returned array and of the accumulator + where the elements are multiplied. If ``dtype`` has the value ``None`` + and the type of ``a`` is an integer type of precision less than the + default platform integer, then the default platform integer precision + is used. Otherwise, the dtype is the same as that of ``a``. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. - Returns - ------- - cumprod : ndarray - A new array holding the result is returned unless out is specified, - in which case a reference to out is returned. + Returns + ------- + cumprod : ndarray + A new array holding the result is returned unless out is specified, + in which case a reference to out is returned. - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. + Notes + ----- + The mask is lost if `out` is not a valid MaskedArray ! - """ + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + """ result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) if out is not None: if isinstance(out, MaskedArray): @@ -4545,12 +4589,13 @@ class MaskedArray(ndarray): purposes. """ + cf = 'CF'[self.flags.fnc] state = (1, self.shape, self.dtype, self.flags.fnc, - self._data.tostring(), - getmaskarray(self).tostring(), + self._data.tostring(cf), + getmaskarray(self).tostring(cf), self._fill_value, ) return state @@ -4864,19 +4909,18 @@ class _frommethod: return doc # def __call__(self, a, *args, **params): - if isinstance(a, MaskedArray): - return getattr(a, self.__name__).__call__(*args, **params) - #FIXME ---- - #As x is not a MaskedArray, we transform it to a ndarray with asarray - #... and call the corresponding method. - #Except that sometimes it doesn't work (try reshape([1,2,3,4],(2,2))) - #we end up with a "SystemError: NULL result without error in PyObject_Call" - #A dirty trick is then to call the initial numpy function... - method = getattr(narray(a, copy=False), self.__name__) - try: + # Get the method from the array (if possible) + method_name = self.__name__ + method = getattr(a, method_name, None) + if method is not None: return method(*args, **params) - except SystemError: - return getattr(np,self.__name__).__call__(a, *args, **params) + # Still here ? Then a is not a MaskedArray + method = getattr(MaskedArray, method_name, None) + if method is not None: + return method(MaskedArray(a), *args, **params) + # Still here ? OK, let's call the corresponding np function + method = getattr(np, method_name) + return method(a, *args, **params) all = _frommethod('all') anomalies = anom = _frommethod('anom') diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 1aa43a222..9c6b7d66c 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -13,24 +13,25 @@ __date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' __all__ = ['apply_along_axis', 'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', - 'column_stack','compress_cols','compress_rowcols', 'compress_rows', - 'count_masked', 'corrcoef', 'cov', + 'clump_masked', 'clump_unmasked', 'column_stack', 'compress_cols', + 'compress_rowcols', 'compress_rows', 'count_masked', 'corrcoef', + 'cov', 'diagflat', 'dot','dstack', 'ediff1d', 'flatnotmasked_contiguous', 'flatnotmasked_edges', 'hsplit', 'hstack', - 'intersect1d', 'intersect1d_nu', + 'in1d', 'intersect1d', 'intersect1d_nu', 'mask_cols', 'mask_rowcols', 'mask_rows', 'masked_all', 'masked_all_like', 'median', 'mr_', 'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack', 'setdiff1d', 'setmember1d', 'setxor1d', - 'unique1d', 'union1d', + 'unique', 'unique1d', 'union1d', 'vander', 'vstack', ] -from itertools import groupby +import itertools import warnings import core as ma @@ -45,6 +46,8 @@ import numpy.core.umath as umath from numpy.lib.index_tricks import AxisConcatenator from numpy.linalg import lstsq +from numpy.lib.utils import deprecate_with_doc + #............................................................................... def issequence(seq): """Is seq a sequence (ndarray, list or tuple)?""" @@ -56,11 +59,48 @@ def count_masked(arr, axis=None): """ Count the number of masked elements along the given axis. + Parameters ---------- + arr : array_like + An array with (possibly) masked elements. axis : int, optional - Axis along which to count. - If None (default), a flattened version of the array is used. + Axis along which to count. If None (default), a flattened + version of the array is used. + + Returns + ------- + count : int, ndarray + The total number of masked elements (axis=None) or the number + of masked elements along each slice of the given axis. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(9).reshape((3,3)) + >>> a = ma.array(a) + >>> a[1, 0] = ma.masked + >>> a[1, 2] = ma.masked + >>> a[2, 1] = ma.masked + >>> a + masked_array(data = + [[0 1 2] + [-- 4 --] + [6 -- 8]], + mask = + [[False False False] + [ True False True] + [False True False]], + fill_value=999999) + >>> ma.count_masked(a) + 3 + + When the `axis` keyword is used an array is returned. + + >>> ma.count_masked(a, axis=0) + array([1, 1, 1]) + >>> ma.count_masked(a, axis=1) + array([0, 2, 1]) """ m = getmaskarray(arr) @@ -373,7 +413,7 @@ def average(a, axis=None, weights=None, returned=False): else: if weights is None: n = a.filled(0).sum(axis=None) - d = umath.add.reduce((-mask).ravel().astype(int)) + d = float(umath.add.reduce((~mask).ravel())) else: w = array(filled(weights, 0.0), float, mask=mask).ravel() n = add.reduce(a.ravel() * w) @@ -830,7 +870,7 @@ def ediff1d(arr, to_end=None, to_begin=None): return ed -def unique1d(ar1, return_index=False, return_inverse=False): +def unique(ar1, return_index=False, return_inverse=False): """ Finds the unique elements of an array. @@ -840,11 +880,11 @@ def unique1d(ar1, return_index=False, return_inverse=False): See Also -------- - np.unique1d : equivalent function for ndarrays. + np.unique : equivalent function for ndarrays. """ - output = np.unique1d(ar1, - return_index=return_index, - return_inverse=return_inverse) + output = np.unique(ar1, + return_index=return_index, + return_inverse=return_inverse) if isinstance(output, tuple): output = list(output) output[0] = output[0].view(MaskedArray) @@ -854,33 +894,7 @@ def unique1d(ar1, return_index=False, return_inverse=False): return output -def intersect1d(ar1, ar2): - """ - Returns the repeated or unique elements belonging to the two arrays. - - Masked values are assumed equals one to the other. - The output is always a masked array - - See Also - -------- - numpy.intersect1d : equivalent function for ndarrays. - - Examples - -------- - >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) - >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) - >>> intersect1d(x, y) - masked_array(data = [1 1 3 3 --], - mask = [False False False False True], - fill_value = 999999) - """ - aux = ma.concatenate((ar1,ar2)) - aux.sort() - return aux[aux[1:] == aux[:-1]] - - - -def intersect1d_nu(ar1, ar2): +def intersect1d(ar1, ar2, assume_unique=False): """ Returns the unique elements common to both arrays. @@ -889,27 +903,28 @@ def intersect1d_nu(ar1, ar2): See Also -------- - intersect1d : Returns repeated or unique common elements. - numpy.intersect1d_nu : equivalent function for ndarrays. + numpy.intersect1d : equivalent function for ndarrays. Examples -------- >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) - >>> intersect1d_nu(x, y) + >>> intersect1d(x, y) masked_array(data = [1 3 --], mask = [False False True], fill_value = 999999) """ - # Might be faster than unique1d( intersect1d( ar1, ar2 ) )? - aux = ma.concatenate((unique1d(ar1), unique1d(ar2))) + if assume_unique: + aux = ma.concatenate((ar1, ar2)) + else: + # Might be faster than unique1d( intersect1d( ar1, ar2 ) )? + aux = ma.concatenate((unique(ar1), unique(ar2))) aux.sort() return aux[aux[1:] == aux[:-1]] - -def setxor1d(ar1, ar2): +def setxor1d(ar1, ar2, assume_unique=False): """ Set exclusive-or of 1D arrays with unique elements. @@ -918,6 +933,10 @@ def setxor1d(ar1, ar2): numpy.setxor1d : equivalent function for ndarrays """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + aux = ma.concatenate((ar1, ar2)) if aux.size == 0: return aux @@ -929,54 +948,52 @@ def setxor1d(ar1, ar2): flag2 = (flag[1:] == flag[:-1]) return aux[flag2] - -def setmember1d(ar1, ar2): +def in1d(ar1, ar2, assume_unique=False): """ - Return a boolean array set True where first element is in second array. + Test whether each element of an array is also present in a second + array. See Also -------- - numpy.setmember1d : equivalent function for ndarrays. + numpy.in1d : equivalent function for ndarrays - """ - ar1 = ma.asanyarray(ar1) - ar2 = ma.asanyarray( ar2 ) - ar = ma.concatenate((ar1, ar2 )) - b1 = ma.zeros(ar1.shape, dtype = np.int8) - b2 = ma.ones(ar2.shape, dtype = np.int8) - tt = ma.concatenate((b1, b2)) - - # We need this to be a stable sort, so always use 'mergesort' here. The - # values from the first array should always come before the values from the - # second array. - perm = ar.argsort(kind='mergesort') - aux = ar[perm] - aux2 = tt[perm] -# flag = ediff1d( aux, 1 ) == 0 - flag = ma.concatenate((aux[1:] == aux[:-1], [False])) - ii = ma.where( flag * aux2 )[0] - aux = perm[ii+1] - perm[ii+1] = perm[ii] - perm[ii] = aux - # - indx = perm.argsort(kind='mergesort')[:len( ar1 )] - # - return flag[indx] + Notes + ----- + .. versionadded:: 1.4.0 + """ + if not assume_unique: + ar1, rev_idx = unique(ar1, return_inverse=True) + ar2 = unique(ar2) + + ar = ma.concatenate( (ar1, ar2) ) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + equal_adj = (sar[1:] == sar[:-1]) + flag = ma.concatenate( (equal_adj, [False] ) ) + indx = order.argsort(kind='mergesort')[:len( ar1 )] + + if assume_unique: + return flag[indx] + else: + return flag[indx][rev_idx] def union1d(ar1, ar2): """ - Union of 1D arrays with unique elements. + Union of two arrays. See also -------- numpy.union1d : equivalent function for ndarrays. """ - return unique1d(ma.concatenate((ar1, ar2))) + return unique(ma.concatenate((ar1, ar2))) -def setdiff1d(ar1, ar2): +def setdiff1d(ar1, ar2, assume_unique=False): """ Set difference of 1D arrays with unique elements. @@ -985,12 +1002,63 @@ def setdiff1d(ar1, ar2): numpy.setdiff1d : equivalent function for ndarrays """ - aux = setmember1d(ar1,ar2) + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + aux = in1d(ar1, ar2, assume_unique=True) if aux.size == 0: return aux else: return ma.asarray(ar1)[aux == 0] +@deprecate_with_doc('') +def unique1d(ar1, return_index=False, return_inverse=False): + """ This function is deprecated. Use ma.unique() instead. """ + output = np.unique1d(ar1, + return_index=return_index, + return_inverse=return_inverse) + if isinstance(output, tuple): + output = list(output) + output[0] = output[0].view(MaskedArray) + output = tuple(output) + else: + output = output.view(MaskedArray) + return output + +@deprecate_with_doc('') +def intersect1d_nu(ar1, ar2): + """ This function is deprecated. Use ma.intersect1d() instead.""" + # Might be faster than unique1d( intersect1d( ar1, ar2 ) )? + aux = ma.concatenate((unique1d(ar1), unique1d(ar2))) + aux.sort() + return aux[aux[1:] == aux[:-1]] + +@deprecate_with_doc('') +def setmember1d(ar1, ar2): + """ This function is deprecated. Use ma.in1d() instead.""" + ar1 = ma.asanyarray(ar1) + ar2 = ma.asanyarray( ar2 ) + ar = ma.concatenate((ar1, ar2 )) + b1 = ma.zeros(ar1.shape, dtype = np.int8) + b2 = ma.ones(ar2.shape, dtype = np.int8) + tt = ma.concatenate((b1, b2)) + + # We need this to be a stable sort, so always use 'mergesort' here. The + # values from the first array should always come before the values from the + # second array. + perm = ar.argsort(kind='mergesort') + aux = ar[perm] + aux2 = tt[perm] +# flag = ediff1d( aux, 1 ) == 0 + flag = ma.concatenate((aux[1:] == aux[:-1], [False])) + ii = ma.where( flag * aux2 )[0] + aux = perm[ii+1] + perm[ii+1] = perm[ii] + perm[ii] = aux + # + indx = perm.argsort(kind='mergesort')[:len( ar1 )] + # + return flag[indx] #####-------------------------------------------------------------------------- @@ -1302,7 +1370,7 @@ def flatnotmasked_contiguous(a): if len(unmasked) == 0: return None result = [] - for k, group in groupby(enumerate(unmasked), lambda (i,x):i-x): + for (k, group) in itertools.groupby(enumerate(unmasked), lambda (i,x):i-x): tmp = np.array([g[1] for g in group], int) # result.append((tmp.size, tuple(tmp[[0,-1]]))) result.append( slice(tmp[0], tmp[-1]) ) @@ -1347,6 +1415,73 @@ def notmasked_contiguous(a, axis=None): return result +def _ezclump(mask): + """ + Finds the clumps (groups of data with the same values) for a 1D bool array. + + Returns a series of slices. + """ + #def clump_masked(a): + if mask.ndim > 1: + mask = mask.ravel() + idx = (mask[1:] - mask[:-1]).nonzero() + idx = idx[0] + 1 + slices = [slice(left, right) + for (left, right) in zip(itertools.chain([0], idx), + itertools.chain(idx, [len(mask)]),)] + return slices + + +def clump_unmasked(a): + """ + Returns a list of slices corresponding to the unmasked clumps of a 1D array. + + Examples + -------- + >>> a = ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = ma.masked + >>> clump_unmasked(a) + [slice(3, 6, None), slice(7, 8, None)] + + .. versionadded:: 1.4.0 + """ + mask = getattr(a, '_mask', nomask) + if mask is nomask: + return [slice(0, a.size)] + slices = _ezclump(mask) + if a[0] is masked: + result = slices[1::2] + else: + result = slices[::2] + return result + + +def clump_masked(a): + """ + Returns a list of slices corresponding to the masked clumps of a 1D array. + + Examples + -------- + >>> a = ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = ma.masked + >>> clump_masked(a) + [slice(0, 3, None), slice(6, 7, None), slice(8, None, None)] + + .. versionadded:: 1.4.0 + """ + mask = ma.getmask(a) + if mask is nomask: + return [] + slices = _ezclump(mask) + if len(slices): + if a[0] is masked: + slices = slices[::2] + else: + slices = slices[1::2] + return slices + + + #####-------------------------------------------------------------------------- #---- Polynomial fit --- #####-------------------------------------------------------------------------- diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index e994a67c6..dc37ff4b6 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -388,6 +388,14 @@ class TestMaskedArray(TestCase): assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled, a) + def test_pickling_keepalignment(self): + "Tests pickling w/ F_CONTIGUOUS arrays" + import cPickle + a = arange(10) + a.shape = (-1, 2) + b = a.T + test = cPickle.loads(cPickle.dumps(b)) + assert_equal(test, b) def test_single_element_subscript(self): "Tests single element subscripts of Maskedarrays." @@ -660,6 +668,15 @@ class TestMaskedArrayArithmetic(TestCase): self.failUnless(minimum(xm, xm).mask) + def test_masked_singleton_equality(self): + "Tests (in)equality on masked snigleton" + a = array([1, 2, 3], mask=[1, 1, 0]) + assert((a[0] == 0) is masked) + assert((a[0] != 0) is masked) + assert_equal((a[-1] == 0), False) + assert_equal((a[-1] != 0), True) + + def test_arithmetic_with_masked_singleton(self): "Checks that there's no collapsing to masked" x = masked_array([1,2]) @@ -775,6 +792,12 @@ class TestMaskedArrayArithmetic(TestCase): assert_equal(amaximum, np.maximum.outer(a,a)) + def test_minmax_reduce(self): + "Test np.min/maximum.reduce on array w/ full False mask" + a = array([1, 2, 3], mask=[False, False, False]) + b = np.maximum.reduce(a) + assert_equal(b, 3) + def test_minmax_funcs_with_output(self): "Tests the min/max functions with explicit outputs" mask = np.random.rand(12).round() @@ -1053,6 +1076,7 @@ class TestMaskedArrayArithmetic(TestCase): assert_equal(test.mask, control.mask) assert_equal(a.mask, [0, 0, 0, 0, 1]) + #------------------------------------------------------------------------------ class TestMaskedArrayAttributes(TestCase): @@ -1351,7 +1375,7 @@ class TestFillingValues(TestCase): assert_equal(test[1][0], maximum_fill_value(a['B']['BA'])) assert_equal(test[1][1], maximum_fill_value(a['B']['BB'])) assert_equal(test[1], maximum_fill_value(a['B'])) - + #------------------------------------------------------------------------------ @@ -2315,7 +2339,7 @@ class TestMaskedArrayMethods(TestCase): #------------------------------------------------------------------------------ -class TestMaskArrayMathMethod(TestCase): +class TestMaskedArrayMathMethods(TestCase): def setUp(self): "Base data definition." @@ -2954,6 +2978,16 @@ class TestMaskedArrayFunctions(TestCase): control = np.array([ 0, 0, 0, 0, 0, 1], dtype=bool) assert_equal(test, control) + + def test_on_ndarray(self): + "Test functions on ndarrays" + a = np.array([1, 2, 3, 4]) + m = array(a, mask=False) + test = anom(a) + assert_equal(test, m.anom()) + test = reshape(a, (2, 2)) + assert_equal(test, m.reshape(2, 2)) + #------------------------------------------------------------------------------ class TestMaskedFields(TestCase): diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 3c6de62be..c0532b081 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -70,26 +70,43 @@ class TestGeneric(TestCase): mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) test = masked_all_like(control) assert_equal(test, control) + + def test_clump_masked(self): + "Test clump_masked" + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked # + test = clump_masked(a) + control = [slice(0, 3), slice(6, 7), slice(8, 10)] + assert_equal(test, control) + + def test_clump_unmasked(self): + "Test clump_unmasked" + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + test = clump_unmasked(a) + control = [slice(3, 6), slice(7, 8),] + assert_equal(test, control) + class TestAverage(TestCase): "Several tests of average. Why so many ? Good point..." def test_testAverage1(self): "Test of average." - ott = array([0.,1.,2.,3.], mask=[1,0,0,0]) - assert_equal(2.0, average(ott,axis=0)) + ott = array([0.,1.,2.,3.], mask=[True, False, False, False]) + assert_equal(2.0, average(ott, axis=0)) assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.])) - result, wts = average(ott, weights=[1.,1.,2.,1.], returned=1) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1) assert_equal(2.0, result) self.failUnless(wts == 4.0) ott[:] = masked - assert_equal(average(ott,axis=0).mask, [True]) - ott = array([0.,1.,2.,3.], mask=[1,0,0,0]) - ott = ott.reshape(2,2) + assert_equal(average(ott, axis=0).mask, [True]) + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + ott = ott.reshape(2, 2) ott[:,1] = masked - assert_equal(average(ott,axis=0), [2.0, 0.0]) - assert_equal(average(ott,axis=1).mask[0], [True]) + assert_equal(average(ott, axis=0), [2.0, 0.0]) + assert_equal(average(ott, axis=1).mask[0], [True]) assert_equal([2.,0.], average(ott, axis=0)) result, wts = average(ott, axis=0, returned=1) assert_equal(wts, [1., 0.]) @@ -105,43 +122,44 @@ class TestAverage(TestCase): assert_equal(average(y, None), np.add.reduce(np.arange(6))*3./12.) assert_equal(average(y, axis=0), np.arange(6) * 3./2.) assert_equal(average(y, axis=1), - [average(x,axis=0), average(x,axis=0) * 2.0]) + [average(x, axis=0), average(x, axis=0) * 2.0]) assert_equal(average(y, None, weights=w2), 20./6.) assert_equal(average(y, axis=0, weights=w2), [0.,1.,2.,3.,4.,10.]) assert_equal(average(y, axis=1), - [average(x,axis=0), average(x,axis=0) * 2.0]) + [average(x, axis=0), average(x, axis=0) * 2.0]) m1 = zeros(6) - m2 = [0,0,1,1,0,0] - m3 = [[0,0,1,1,0,0],[0,1,1,1,1,0]] + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] m4 = ones(6) m5 = [0, 1, 1, 1, 1, 1] - assert_equal(average(masked_array(x, m1),axis=0), 2.5) - assert_equal(average(masked_array(x, m2),axis=0), 2.5) - assert_equal(average(masked_array(x, m4),axis=0).mask, [True]) - assert_equal(average(masked_array(x, m5),axis=0), 0.0) - assert_equal(count(average(masked_array(x, m4),axis=0)), 0) + assert_equal(average(masked_array(x, m1), axis=0), 2.5) + assert_equal(average(masked_array(x, m2), axis=0), 2.5) + assert_equal(average(masked_array(x, m4), axis=0).mask, [True]) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) z = masked_array(y, m3) assert_equal(average(z, None), 20./6.) assert_equal(average(z, axis=0), [0.,1.,99.,99.,4.0, 7.5]) assert_equal(average(z, axis=1), [2.5, 5.0]) - assert_equal(average(z,axis=0, weights=w2), [0.,1., 99., 99., 4.0, 10.0]) + assert_equal(average(z,axis=0, weights=w2), + [0.,1., 99., 99., 4.0, 10.0]) def test_testAverage3(self): "Yet more tests of average!" a = arange(6) b = arange(6) * 3 - r1, w1 = average([[a,b],[b,a]], axis=1, returned=1) + r1, w1 = average([[a, b], [b, a]], axis=1, returned=1) assert_equal(shape(r1) , shape(w1)) assert_equal(r1.shape , w1.shape) - r2, w2 = average(ones((2,2,3)), axis=0, weights=[3,1], returned=1) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1) assert_equal(shape(w2) , shape(r2)) - r2, w2 = average(ones((2,2,3)), returned=1) + r2, w2 = average(ones((2, 2, 3)), returned=1) assert_equal(shape(w2) , shape(r2)) - r2, w2 = average(ones((2,2,3)), weights=ones((2,2,3)), returned=1) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1) assert_equal(shape(w2), shape(r2)) - a2d = array([[1,2],[0,4]], float) - a2dm = masked_array(a2d, [[0,0],[1,0]]) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[False, False],[True, False]]) a2da = average(a2d, axis=0) assert_equal(a2da, [0.5, 3.0]) a2dma = average(a2dm, axis=0) @@ -151,8 +169,19 @@ class TestAverage(TestCase): a2dma = average(a2dm, axis=1) assert_equal(a2dma, [1.5, 4.0]) + def test_onintegers_with_mask(self): + "Test average on integers with mask" + a = average(array([1, 2])) + assert_equal(a, 1.5) + a = average(array([1, 2, 3, 4], mask=[False, False, True, True])) + assert_equal(a, 1.5) + + class TestConcatenator(TestCase): - "Tests for mr_, the equivalent of r_ for masked arrays." + """ + Tests for mr_, the equivalent of r_ for masked arrays. + """ + def test_1d(self): "Tests mr_ on 1D arrays." assert_array_equal(mr_[1,2,3,4,5,6],array([1,2,3,4,5,6])) @@ -186,7 +215,10 @@ class TestConcatenator(TestCase): class TestNotMasked(TestCase): - "Tests notmasked_edges and notmasked_contiguous." + """ + Tests notmasked_edges and notmasked_contiguous. + """ + def test_edges(self): "Tests unmasked_edges" data = masked_array(np.arange(25).reshape(5, 5), @@ -222,7 +254,6 @@ class TestNotMasked(TestCase): assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)]) - def test_contiguous(self): "Tests notmasked_contiguous" a = masked_array(np.arange(24).reshape(3,8), @@ -248,7 +279,6 @@ class TestNotMasked(TestCase): - class Test2DFunctions(TestCase): "Tests 2D functions" def test_compress2d(self): @@ -573,19 +603,19 @@ class TestPolynomial(TestCase): class TestArraySetOps(TestCase): # - def test_unique1d_onlist(self): - "Test unique1d on list" + def test_unique_onlist(self): + "Test unique on list" data = [1, 1, 1, 2, 2, 3] - test = unique1d(data, return_index=True, return_inverse=True) + test = unique(data, return_index=True, return_inverse=True) self.failUnless(isinstance(test[0], MaskedArray)) assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0])) assert_equal(test[1], [0, 3, 5]) assert_equal(test[2], [0, 0, 0, 1, 1, 2]) - def test_unique1d_onmaskedarray(self): - "Test unique1d on masked data w/use_mask=True" + def test_unique_onmaskedarray(self): + "Test unique on masked data w/use_mask=True" data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0]) - test = unique1d(data, return_index=True, return_inverse=True) + test = unique(data, return_index=True, return_inverse=True) assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) assert_equal(test[1], [0, 3, 5, 2]) assert_equal(test[2], [0, 0, 3, 1, 3, 2]) @@ -593,26 +623,26 @@ class TestArraySetOps(TestCase): data.fill_value = 3 data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0], fill_value=3) - test = unique1d(data, return_index=True, return_inverse=True) + test = unique(data, return_index=True, return_inverse=True) assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) assert_equal(test[1], [0, 3, 5, 2]) assert_equal(test[2], [0, 0, 3, 1, 3, 2]) - def test_unique1d_allmasked(self): + def test_unique_allmasked(self): "Test all masked" data = masked_array([1, 1, 1], mask=True) - test = unique1d(data, return_index=True, return_inverse=True) + test = unique(data, return_index=True, return_inverse=True) assert_equal(test[0], masked_array([1,], mask=[True])) assert_equal(test[1], [0]) assert_equal(test[2], [0, 0, 0]) # "Test masked" data = masked - test = unique1d(data, return_index=True, return_inverse=True) + test = unique(data, return_index=True, return_inverse=True) assert_equal(test[0], masked_array(masked)) assert_equal(test[1], [0]) assert_equal(test[2], [0]) - + def test_ediff1d(self): "Tests mediff1d" x = masked_array(np.arange(5), mask=[1,0,0,0,1]) @@ -689,15 +719,6 @@ class TestArraySetOps(TestCase): x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) test = intersect1d(x, y) - control = array([1, 1, 3, 3, -1], mask=[0, 0, 0, 0, 1]) - assert_equal(test, control) - - - def test_intersect1d_nu(self): - "Test intersect1d_nu" - x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) - y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) - test = intersect1d_nu(x, y) control = array([1, 3, -1], mask=[0, 0, 1]) assert_equal(test, control) @@ -705,7 +726,7 @@ class TestArraySetOps(TestCase): def test_setxor1d(self): "Test setxor1d" a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) - b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, -1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) test = setxor1d(a, b) assert_equal(test, array([3, 4, 7])) # @@ -729,30 +750,35 @@ class TestArraySetOps(TestCase): assert_array_equal([], setxor1d([],[])) - def test_setmember1d( self ): - "Test setmember1d" + def test_in1d( self ): + "Test in1d" a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) - b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, -1]) - test = setmember1d(a, b) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = in1d(a, b) assert_equal(test, [True, True, True, False, True]) # - assert_array_equal([], setmember1d([],[])) + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, False, True, True]) + # + assert_array_equal([], in1d([],[])) def test_union1d( self ): "Test union1d" - a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) - b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, -1]) + a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) test = union1d(a, b) control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) assert_equal(test, control) # - assert_array_equal([], setmember1d([],[])) + assert_array_equal([], union1d([],[])) def test_setdiff1d( self ): "Test setdiff1d" - a = array([6, 5, 4, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 1]) + a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1]) b = array([2, 4, 3, 3, 2, 1, 5]) test = setdiff1d(a, b) assert_equal(test, array([6, 7, -1], mask=[0, 0, 1])) @@ -769,8 +795,6 @@ class TestArraySetOps(TestCase): assert_array_equal(setdiff1d(a,b), np.array(['c'])) - - class TestShapeBase(TestCase): # def test_atleast1d(self): diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py index 15788141c..e337c35e2 100644 --- a/numpy/testing/decorators.py +++ b/numpy/testing/decorators.py @@ -9,6 +9,8 @@ setup and teardown functions and so on - see nose.tools for more information. """ +import warnings +import sys def slow(t): """Labels a test as 'slow'. @@ -170,3 +172,114 @@ def knownfailureif(fail_condition, msg=None): return nose.tools.make_decorator(f)(knownfailer) return knownfail_decorator + +# The following two classes are copied from python 2.6 warnings module (context +# manager) +class WarningMessage(object): + + """Holds the result of a single showwarning() call.""" + + _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file", + "line") + + def __init__(self, message, category, filename, lineno, file=None, + line=None): + local_values = locals() + for attr in self._WARNING_DETAILS: + setattr(self, attr, local_values[attr]) + if category: + self._category_name = category.__name__ + else: + self._category_name = None + + def __str__(self): + return ("{message : %r, category : %r, filename : %r, lineno : %s, " + "line : %r}" % (self.message, self._category_name, + self.filename, self.lineno, self.line)) + +class WarningManager: + def __init__(self, record=False, module=None): + self._record = record + if module is None: + self._module = sys.modules['warnings'] + else: + self._module = module + self._entered = False + + def __enter__(self): + if self._entered: + raise RuntimeError("Cannot enter %r twice" % self) + self._entered = True + self._filters = self._module.filters + self._module.filters = self._filters[:] + self._showwarning = self._module.showwarning + if self._record: + log = [] + def showwarning(*args, **kwargs): + log.append(WarningMessage(*args, **kwargs)) + self._module.showwarning = showwarning + return log + else: + return None + + def __exit__(self): + if not self._entered: + raise RuntimeError("Cannot exit %r without entering first" % self) + self._module.filters = self._filters + self._module.showwarning = self._showwarning + +def deprecated(conditional=True): + """This decorator can be used to filter Deprecation Warning, to avoid + printing them during the test suite run, while checking that the test + actually raises a DeprecationWarning. + + Parameters + ---------- + conditional : bool or callable. + Flag to determine whether to mark test as deprecated or not. If the + condition is a callable, it is used at runtime to dynamically make the + decision. + + Returns + ------- + decorator : function + Decorator, which, when applied to a function, causes SkipTest + to be raised when the skip_condition was True, and the function + to be called normally otherwise. + + Notes + ----- + + .. versionadded:: 1.4.0 + """ + def deprecate_decorator(f): + # Local import to avoid a hard nose dependency and only incur the + # import time overhead at actual test-time. + import nose + from noseclasses import KnownFailureTest + + def _deprecated_imp(*args, **kwargs): + # Poor man's replacement for the with statement + ctx = WarningManager(record=True) + l = ctx.__enter__() + warnings.simplefilter('always') + try: + f(*args, **kwargs) + if not len(l) > 0: + raise AssertionError("No warning raised when calling %s" + % f.__name__) + if not l[0].category is DeprecationWarning: + raise AssertionError("First warning for %s is not a " \ + "DeprecationWarning( is %s)" % (f.__name__, l[0])) + finally: + ctx.__exit__() + + if callable(conditional): + cond = conditional() + else: + cond = conditional + if cond: + return nose.tools.make_decorator(f)(_deprecated_imp) + else: + return f + return deprecate_decorator diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py index 7a10a5b1f..a818e5e62 100644 --- a/numpy/testing/nosetester.py +++ b/numpy/testing/nosetester.py @@ -115,18 +115,23 @@ class NoseTester(object): If None, extract calling module path Default is None ''' + package_name = None if package is None: f = sys._getframe(1) - package = f.f_locals.get('__file__', None) - assert package is not None - package = os.path.dirname(package) + package_path = f.f_locals.get('__file__', None) + assert package_path is not None + package_path = os.path.dirname(package_path) + package_name = f.f_locals.get('__name__', None) elif isinstance(package, type(os)): - package = os.path.dirname(package.__file__) - self.package_path = package + package_path = os.path.dirname(package.__file__) + package_name = getattr(package, '__name__', None) + self.package_path = package_path # find the package name under test; this name is used to limit coverage # reporting (if enabled) - self.package_name = get_package_name(package) + if package_name is None: + package_name = get_package_name(package_path) + self.package_name = package_name def _test_argv(self, label, verbose, extra_argv): ''' Generate argv for nosetest command diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index aabdc88a0..0ecf0622d 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -48,7 +48,7 @@ class _GenericTest(object): a = np.array([1, 1], dtype=np.object) self._test_equal(a, 1) -class TestEqual(_GenericTest, unittest.TestCase): +class TestArrayEqual(_GenericTest, unittest.TestCase): def setUp(self): self._assert_func = assert_array_equal @@ -126,11 +126,148 @@ class TestEqual(_GenericTest, unittest.TestCase): self._test_not_equal(c, b) +class TestEqual(TestArrayEqual): + def setUp(self): + self._assert_func = assert_equal + + def test_nan_items(self): + self._assert_func(np.nan, np.nan) + self._assert_func([np.nan], [np.nan]) + self._test_not_equal(np.nan, [np.nan]) + self._test_not_equal(np.nan, 1) + + def test_inf_items(self): + self._assert_func(np.inf, np.inf) + self._assert_func([np.inf], [np.inf]) + self._test_not_equal(np.inf, [np.inf]) + + def test_non_numeric(self): + self._assert_func('ab', 'ab') + self._test_not_equal('ab', 'abb') + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_negative_zero(self): + self._test_not_equal(np.PZERO, np.NZERO) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + +class TestArrayAlmostEqual(_GenericTest, unittest.TestCase): + def setUp(self): + self._assert_func = assert_array_almost_equal + + def test_simple(self): + x = np.array([1234.2222]) + y = np.array([1234.2223]) + + self._assert_func(x, y, decimal=3) + self._assert_func(x, y, decimal=4) + self.failUnlessRaises(AssertionError, + lambda: self._assert_func(x, y, decimal=5)) + + def test_nan(self): + anan = np.array([np.nan]) + aone = np.array([1]) + ainf = np.array([np.inf]) + self._assert_func(anan, anan) + self.failUnlessRaises(AssertionError, + lambda : self._assert_func(anan, aone)) + self.failUnlessRaises(AssertionError, + lambda : self._assert_func(anan, ainf)) + self.failUnlessRaises(AssertionError, + lambda : self._assert_func(ainf, anan)) class TestAlmostEqual(_GenericTest, unittest.TestCase): def setUp(self): - self._assert_func = assert_array_almost_equal + self._assert_func = assert_almost_equal + + def test_nan_item(self): + self._assert_func(np.nan, np.nan) + self.failUnlessRaises(AssertionError, + lambda : self._assert_func(np.nan, 1)) + self.failUnlessRaises(AssertionError, + lambda : self._assert_func(np.nan, np.inf)) + self.failUnlessRaises(AssertionError, + lambda : self._assert_func(np.inf, np.nan)) + + def test_inf_item(self): + self._assert_func(np.inf, np.inf) + self._assert_func(-np.inf, -np.inf) + + def test_simple_item(self): + self._test_not_equal(1, 2) + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + z = np.array([complex(1, 2), complex(np.nan, 1)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + self._test_not_equal(x, z) + +class TestApproxEqual(unittest.TestCase): + def setUp(self): + self._assert_func = assert_approx_equal + def test_simple_arrays(self): + x = np.array([1234.22]) + y = np.array([1234.23]) + + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + self.failUnlessRaises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_simple_items(self): + x = 1234.22 + y = 1234.23 + + self._assert_func(x, y, significant=4) + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + self.failUnlessRaises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_nan_array(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + self.failUnlessRaises(AssertionError, + lambda : self._assert_func(anan, aone)) + self.failUnlessRaises(AssertionError, + lambda : self._assert_func(anan, ainf)) + self.failUnlessRaises(AssertionError, + lambda : self._assert_func(ainf, anan)) + + def test_nan_items(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + self.failUnlessRaises(AssertionError, + lambda : self._assert_func(anan, aone)) + self.failUnlessRaises(AssertionError, + lambda : self._assert_func(anan, ainf)) + self.failUnlessRaises(AssertionError, + lambda : self._assert_func(ainf, anan)) class TestRaises(unittest.TestCase): def setUp(self): diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index ba9b16b18..96b2d462c 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -6,6 +6,7 @@ import os import sys import re import operator +import types from nosetester import import_nose __all__ = ['assert_equal', 'assert_almost_equal','assert_approx_equal', @@ -22,6 +23,56 @@ def assert_(val, msg='') : if not val : raise AssertionError(msg) +def gisnan(x): + """like isnan, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isnan and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isnan + st = isnan(x) + if isinstance(st, types.NotImplementedType): + raise TypeError("isnan not supported for this type") + return st + +def gisfinite(x): + """like isfinite, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isfinite and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isfinite + st = isfinite(x) + if isinstance(st, types.NotImplementedType): + raise TypeError("isfinite not supported for this type") + return st + +def gisinf(x): + """like isinf, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isinf and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isinf + st = isinf(x) + if isinstance(st, types.NotImplementedType): + raise TypeError("isinf not supported for this type") + return st def rand(*args): """Returns an array of random numbers with the given shape. @@ -181,10 +232,69 @@ def assert_equal(actual,desired,err_msg='',verbose=True): for k in range(len(desired)): assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k,err_msg), verbose) return - from numpy.core import ndarray + from numpy.core import ndarray, isscalar, signbit + from numpy.lib import iscomplexobj, real, imag if isinstance(actual, ndarray) or isinstance(desired, ndarray): return assert_array_equal(actual, desired, err_msg, verbose) msg = build_err_msg([actual, desired], err_msg, verbose=verbose) + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except ValueError: + usecomplex = False + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_equal(actualr, desiredr) + assert_equal(actuali, desiredi) + except AssertionError: + raise AssertionError("Items are not equal:\n" \ + "ACTUAL: %s\n" \ + "DESIRED: %s\n" % (str(actual), str(desired))) + + # Inf/nan/negative zero handling + try: + # isscalar test to check cases such as [np.nan] != np.nan + if isscalar(desired) != isscalar(actual): + raise AssertionError(msg) + + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + isdesnan = gisnan(desired) + isactnan = gisnan(actual) + if isdesnan or isactnan: + if not (isdesnan and isactnan): + raise AssertionError(msg) + else: + if not desired == actual: + raise AssertionError(msg) + return + elif desired == 0 and actual == 0: + if not signbit(desired) == signbit(actual): + raise AssertionError(msg) + # If TypeError or ValueError raised while using isnan and co, just handle + # as before + except TypeError: + pass + except ValueError: + pass if desired != actual : raise AssertionError(msg) @@ -258,9 +368,55 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): """ from numpy.core import ndarray + from numpy.lib import iscomplexobj, real, imag + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except ValueError: + usecomplex = False + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_almost_equal(actualr, desiredr, decimal=decimal) + assert_almost_equal(actuali, desiredi, decimal=decimal) + except AssertionError: + raise AssertionError("Items are not equal:\n" \ + "ACTUAL: %s\n" \ + "DESIRED: %s\n" % (str(actual), str(desired))) + if isinstance(actual, ndarray) or isinstance(desired, ndarray): return assert_array_almost_equal(actual, desired, decimal, err_msg) - msg = build_err_msg([actual, desired], err_msg, verbose=verbose) + msg = build_err_msg([actual, desired], err_msg, verbose=verbose, + header='Arrays are not almost equal') + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + if gisnan(desired) or gisnan(actual): + if not (gisnan(desired) and gisnan(actual)): + raise AssertionError(msg) + else: + if not desired == actual: + raise AssertionError(msg) + return + except TypeError: + pass if round(abs(desired - actual),decimal) != 0 : raise AssertionError(msg) @@ -317,12 +473,14 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): True """ - import math + import numpy as np actual, desired = map(float, (actual, desired)) if desired==actual: return # Normalized the numbers to be in range (-10.0,10.0) - scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) + # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) + scale = 0.5*(np.abs(desired) + np.abs(actual)) + scale = np.power(10,np.floor(np.log10(scale))) try: sc_desired = desired/scale except ZeroDivisionError: @@ -335,7 +493,21 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): header='Items are not equal to %d significant digits:' % significant, verbose=verbose) - if math.fabs(sc_desired - sc_actual) >= pow(10.,-(significant-1)) : + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + if gisnan(desired) or gisnan(actual): + if not (gisnan(desired) and gisnan(actual)): + raise AssertionError(msg) + else: + if not desired == actual: + raise AssertionError(msg) + return + except TypeError: + pass + if np.abs(sc_desired - sc_actual) >= np.power(10.,-(significant-1)) : raise AssertionError(msg) def assert_array_compare(comparison, x, y, err_msg='', verbose=True, @@ -374,6 +546,10 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, '%s mismatch)' % (xnanid, ynanid), verbose=verbose, header=header, names=('x', 'y')) + raise AssertionError(msg) + # If only one item, it was a nan, so just return + if x.size == y.size == 1: + return val = comparison(x[~xnanid], y[~ynanid]) else: val = comparison(x,y) @@ -526,9 +702,22 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): y: array([ 1. , 2.33333, 5. ]) """ - from numpy.core import around, number, float_ + from numpy.core import around, number, float_, any from numpy.lib import issubdtype def compare(x, y): + try: + if any(gisinf(x)) or any( gisinf(y)): + xinfid = gisinf(x) + yinfid = gisinf(y) + if not xinfid == yinfid: + return False + # if one item, x and y is +- inf + if x.size == y.size == 1: + return x == y + x = x[~xinfid] + y = y[~yinfid] + except TypeError: + pass z = abs(x-y) if not issubdtype(z.dtype, number): z = z.astype(float_) # handle object arrays @@ -53,7 +53,7 @@ MINOR = 4 MICRO = 0 ISRELEASED = False VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) - + # Return the svn version as a string, raise a ValueError otherwise def svn_version(): def _minimal_ext_cmd(cmd): @@ -76,14 +76,15 @@ def svn_version(): return "" r = re.compile('Revision: ([0-9]+)') - svnver = None + svnver = "" for line in out.split('\n'): - m = r.match(line) + m = r.match(line.strip()) if m: svnver = m.group(1) if not svnver: - raise ValueError("Error while parsing svn version ?") + print("Error while parsing svn version") + return svnver # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly |