From 5c86844c34674e3d580ac2cd12ef171e18130b13 Mon Sep 17 00:00:00 2001 From: Stefan van der Walt Date: Sat, 23 Aug 2008 23:17:23 +0000 Subject: Move documentation outside of source tree. Remove `doc` import from __init__. --- numpy/doc/CAPI.txt | 313 ----- numpy/doc/DISTUTILS.txt | 546 -------- numpy/doc/EXAMPLE_DOCSTRING.txt | 104 -- numpy/doc/HOWTO_BUILD_DOCS.txt | 71 - numpy/doc/HOWTO_DOCUMENT.txt | 430 ------ numpy/doc/README.txt | 15 - numpy/doc/__init__.py | 14 +- numpy/doc/basics.py | 137 ++ numpy/doc/broadcasting.py | 176 +++ numpy/doc/creation.py | 132 ++ numpy/doc/cython/MANIFEST | 2 - numpy/doc/cython/Makefile | 37 - numpy/doc/cython/README.txt | 20 - numpy/doc/cython/c_numpy.pxd | 136 -- numpy/doc/cython/c_python.pxd | 62 - numpy/doc/cython/numpyx.pyx | 127 -- numpy/doc/cython/run_test.py | 3 - numpy/doc/cython/setup.py | 49 - numpy/doc/example.py | 125 -- numpy/doc/glossary.py | 367 +++++ numpy/doc/howtofind.py | 9 + numpy/doc/html/api-objects.txt | 4 - numpy/doc/html/crarr.png | Bin 340 -> 0 bytes numpy/doc/html/epydoc.css | 322 ----- numpy/doc/html/epydoc.js | 293 ---- numpy/doc/html/example-module.html | 316 ----- numpy/doc/html/example-pysrc.html | 204 --- numpy/doc/html/frames.html | 17 - numpy/doc/html/help.html | 268 ---- numpy/doc/html/identifier-index.html | 180 --- numpy/doc/html/index.html | 17 - numpy/doc/html/module-tree.html | 101 -- numpy/doc/html/redirect.html | 38 - numpy/doc/html/toc-everything.html | 33 - numpy/doc/html/toc-example-module.html | 33 - numpy/doc/html/toc.html | 33 - numpy/doc/indexing.py | 384 ++++++ numpy/doc/internals.py | 162 +++ numpy/doc/io.py | 9 + numpy/doc/jargon.py | 9 + numpy/doc/methods_vs_functions.py | 9 + numpy/doc/misc.py | 9 + numpy/doc/newdtype_example/example.py | 16 - numpy/doc/newdtype_example/floatint.c | 153 --- numpy/doc/newdtype_example/floatint/__init__.py | 0 numpy/doc/newdtype_example/setup.py | 12 - numpy/doc/npy-format.txt | 294 ---- numpy/doc/pep_buffer.txt | 869 ------------ numpy/doc/performance.py | 9 + numpy/doc/pyrex/MANIFEST | 2 - numpy/doc/pyrex/Makefile | 9 - numpy/doc/pyrex/README.txt | 3 - numpy/doc/pyrex/c_numpy.pxd | 125 -- numpy/doc/pyrex/c_python.pxd | 20 - numpy/doc/pyrex/notes | 3 - numpy/doc/pyrex/numpyx.c | 1037 -------------- numpy/doc/pyrex/numpyx.pyx | 101 -- numpy/doc/pyrex/run_test.py | 3 - numpy/doc/pyrex/setup.py | 48 - numpy/doc/records.txt | 87 -- numpy/doc/reference/basics.py | 137 -- numpy/doc/reference/broadcasting.py | 176 --- numpy/doc/reference/creation.py | 132 -- numpy/doc/reference/glossary.py | 367 ----- numpy/doc/reference/howtofind.py | 9 - numpy/doc/reference/indexing.py | 384 ------ numpy/doc/reference/internals.py | 162 --- numpy/doc/reference/io.py | 9 - numpy/doc/reference/jargon.py | 9 - numpy/doc/reference/methods_vs_functions.py | 9 - numpy/doc/reference/misc.py | 9 - numpy/doc/reference/performance.py | 9 - numpy/doc/reference/structured_arrays.py | 176 --- numpy/doc/reference/ufuncs.py | 135 -- numpy/doc/structured_arrays.py | 176 +++ numpy/doc/swig/Makefile | 36 - numpy/doc/swig/README | 130 -- numpy/doc/swig/doc/Makefile | 51 - numpy/doc/swig/doc/numpy_swig.html | 1244 ----------------- numpy/doc/swig/doc/numpy_swig.pdf | Bin 168839 -> 0 bytes numpy/doc/swig/doc/numpy_swig.txt | 950 ------------- numpy/doc/swig/doc/testing.html | 482 ------- numpy/doc/swig/doc/testing.pdf | Bin 72439 -> 0 bytes numpy/doc/swig/doc/testing.txt | 173 --- numpy/doc/swig/numpy.i | 1634 ----------------------- numpy/doc/swig/pyfragments.swg | 174 --- numpy/doc/swig/test/Array.i | 107 -- numpy/doc/swig/test/Array1.cxx | 131 -- numpy/doc/swig/test/Array1.h | 55 - numpy/doc/swig/test/Array2.cxx | 168 --- numpy/doc/swig/test/Array2.h | 63 - numpy/doc/swig/test/Farray.cxx | 122 -- numpy/doc/swig/test/Farray.h | 56 - numpy/doc/swig/test/Farray.i | 73 - numpy/doc/swig/test/Fortran.cxx | 24 - numpy/doc/swig/test/Fortran.h | 21 - numpy/doc/swig/test/Fortran.i | 36 - numpy/doc/swig/test/Makefile | 34 - numpy/doc/swig/test/Matrix.cxx | 112 -- numpy/doc/swig/test/Matrix.h | 52 - numpy/doc/swig/test/Matrix.i | 45 - numpy/doc/swig/test/Tensor.cxx | 131 -- numpy/doc/swig/test/Tensor.h | 52 - numpy/doc/swig/test/Tensor.i | 49 - numpy/doc/swig/test/Vector.cxx | 100 -- numpy/doc/swig/test/Vector.h | 58 - numpy/doc/swig/test/Vector.i | 47 - numpy/doc/swig/test/setup.py | 66 - numpy/doc/swig/test/testArray.py | 283 ---- numpy/doc/swig/test/testFarray.py | 158 --- numpy/doc/swig/test/testFortran.py | 169 --- numpy/doc/swig/test/testMatrix.py | 361 ----- numpy/doc/swig/test/testTensor.py | 401 ------ numpy/doc/swig/test/testVector.py | 380 ------ numpy/doc/ufuncs.py | 135 ++ numpy/doc/ufuncs.txt | 103 -- 116 files changed, 1735 insertions(+), 16737 deletions(-) delete mode 100644 numpy/doc/CAPI.txt delete mode 100644 numpy/doc/DISTUTILS.txt delete mode 100644 numpy/doc/EXAMPLE_DOCSTRING.txt delete mode 100644 numpy/doc/HOWTO_BUILD_DOCS.txt delete mode 100644 numpy/doc/HOWTO_DOCUMENT.txt delete mode 100644 numpy/doc/README.txt create mode 100644 numpy/doc/basics.py create mode 100644 numpy/doc/broadcasting.py create mode 100644 numpy/doc/creation.py delete mode 100644 numpy/doc/cython/MANIFEST delete mode 100644 numpy/doc/cython/Makefile delete mode 100644 numpy/doc/cython/README.txt delete mode 100644 numpy/doc/cython/c_numpy.pxd delete mode 100644 numpy/doc/cython/c_python.pxd delete mode 100644 numpy/doc/cython/numpyx.pyx delete mode 100755 numpy/doc/cython/run_test.py delete mode 100755 numpy/doc/cython/setup.py delete mode 100644 numpy/doc/example.py create mode 100644 numpy/doc/glossary.py create mode 100644 numpy/doc/howtofind.py delete mode 100644 numpy/doc/html/api-objects.txt delete mode 100644 numpy/doc/html/crarr.png delete mode 100644 numpy/doc/html/epydoc.css delete mode 100644 numpy/doc/html/epydoc.js delete mode 100644 numpy/doc/html/example-module.html delete mode 100644 numpy/doc/html/example-pysrc.html delete mode 100644 numpy/doc/html/frames.html delete mode 100644 numpy/doc/html/help.html delete mode 100644 numpy/doc/html/identifier-index.html delete mode 100644 numpy/doc/html/index.html delete mode 100644 numpy/doc/html/module-tree.html delete mode 100644 numpy/doc/html/redirect.html delete mode 100644 numpy/doc/html/toc-everything.html delete mode 100644 numpy/doc/html/toc-example-module.html delete mode 100644 numpy/doc/html/toc.html create mode 100644 numpy/doc/indexing.py create mode 100644 numpy/doc/internals.py create mode 100644 numpy/doc/io.py create mode 100644 numpy/doc/jargon.py create mode 100644 numpy/doc/methods_vs_functions.py create mode 100644 numpy/doc/misc.py delete mode 100644 numpy/doc/newdtype_example/example.py delete mode 100644 numpy/doc/newdtype_example/floatint.c delete mode 100644 numpy/doc/newdtype_example/floatint/__init__.py delete mode 100644 numpy/doc/newdtype_example/setup.py delete mode 100644 numpy/doc/npy-format.txt delete mode 100644 numpy/doc/pep_buffer.txt create mode 100644 numpy/doc/performance.py delete mode 100644 numpy/doc/pyrex/MANIFEST delete mode 100644 numpy/doc/pyrex/Makefile delete mode 100644 numpy/doc/pyrex/README.txt delete mode 100644 numpy/doc/pyrex/c_numpy.pxd delete mode 100644 numpy/doc/pyrex/c_python.pxd delete mode 100644 numpy/doc/pyrex/notes delete mode 100644 numpy/doc/pyrex/numpyx.c delete mode 100644 numpy/doc/pyrex/numpyx.pyx delete mode 100755 numpy/doc/pyrex/run_test.py delete mode 100644 numpy/doc/pyrex/setup.py delete mode 100644 numpy/doc/records.txt delete mode 100644 numpy/doc/reference/basics.py delete mode 100644 numpy/doc/reference/broadcasting.py delete mode 100644 numpy/doc/reference/creation.py delete mode 100644 numpy/doc/reference/glossary.py delete mode 100644 numpy/doc/reference/howtofind.py delete mode 100644 numpy/doc/reference/indexing.py delete mode 100644 numpy/doc/reference/internals.py delete mode 100644 numpy/doc/reference/io.py delete mode 100644 numpy/doc/reference/jargon.py delete mode 100644 numpy/doc/reference/methods_vs_functions.py delete mode 100644 numpy/doc/reference/misc.py delete mode 100644 numpy/doc/reference/performance.py delete mode 100644 numpy/doc/reference/structured_arrays.py delete mode 100644 numpy/doc/reference/ufuncs.py create mode 100644 numpy/doc/structured_arrays.py delete mode 100644 numpy/doc/swig/Makefile delete mode 100644 numpy/doc/swig/README delete mode 100644 numpy/doc/swig/doc/Makefile delete mode 100644 numpy/doc/swig/doc/numpy_swig.html delete mode 100644 numpy/doc/swig/doc/numpy_swig.pdf delete mode 100644 numpy/doc/swig/doc/numpy_swig.txt delete mode 100644 numpy/doc/swig/doc/testing.html delete mode 100644 numpy/doc/swig/doc/testing.pdf delete mode 100644 numpy/doc/swig/doc/testing.txt delete mode 100644 numpy/doc/swig/numpy.i delete mode 100644 numpy/doc/swig/pyfragments.swg delete mode 100644 numpy/doc/swig/test/Array.i delete mode 100644 numpy/doc/swig/test/Array1.cxx delete mode 100644 numpy/doc/swig/test/Array1.h delete mode 100644 numpy/doc/swig/test/Array2.cxx delete mode 100644 numpy/doc/swig/test/Array2.h delete mode 100644 numpy/doc/swig/test/Farray.cxx delete mode 100644 numpy/doc/swig/test/Farray.h delete mode 100644 numpy/doc/swig/test/Farray.i delete mode 100644 numpy/doc/swig/test/Fortran.cxx delete mode 100644 numpy/doc/swig/test/Fortran.h delete mode 100644 numpy/doc/swig/test/Fortran.i delete mode 100644 numpy/doc/swig/test/Makefile delete mode 100644 numpy/doc/swig/test/Matrix.cxx delete mode 100644 numpy/doc/swig/test/Matrix.h delete mode 100644 numpy/doc/swig/test/Matrix.i delete mode 100644 numpy/doc/swig/test/Tensor.cxx delete mode 100644 numpy/doc/swig/test/Tensor.h delete mode 100644 numpy/doc/swig/test/Tensor.i delete mode 100644 numpy/doc/swig/test/Vector.cxx delete mode 100644 numpy/doc/swig/test/Vector.h delete mode 100644 numpy/doc/swig/test/Vector.i delete mode 100755 numpy/doc/swig/test/setup.py delete mode 100755 numpy/doc/swig/test/testArray.py delete mode 100755 numpy/doc/swig/test/testFarray.py delete mode 100644 numpy/doc/swig/test/testFortran.py delete mode 100755 numpy/doc/swig/test/testMatrix.py delete mode 100755 numpy/doc/swig/test/testTensor.py delete mode 100755 numpy/doc/swig/test/testVector.py create mode 100644 numpy/doc/ufuncs.py delete mode 100644 numpy/doc/ufuncs.txt (limited to 'numpy/doc') diff --git a/numpy/doc/CAPI.txt b/numpy/doc/CAPI.txt deleted file mode 100644 index 28738635e..000000000 --- a/numpy/doc/CAPI.txt +++ /dev/null @@ -1,313 +0,0 @@ -=============== -C-API for NumPy -=============== - -:Author: Travis Oliphant -:Discussions to: `numpy-discussion@scipy.org`__ -:Created: October 2005 - -__ http://www.scipy.org/Mailing_Lists - -The C API of NumPy is (mostly) backward compatible with Numeric. - -There are a few non-standard Numeric usages (that were not really part -of the API) that will need to be changed: - -* If you used any of the function pointers in the ``PyArray_Descr`` - structure you will have to modify your usage of those. First, - the pointers are all under the member named ``f``. So ``descr->cast`` - is now ``descr->f->cast``. In addition, the - casting functions have eliminated the strides argument (use - ``PyArray_CastTo`` if you need strided casting). All functions have - one or two ``PyArrayObject *`` arguments at the end. This allows the - flexible arrays and mis-behaved arrays to be handled. - -* The ``descr->zero`` and ``descr->one`` constants have been replaced with - function calls, ``PyArray_Zero``, and ``PyArray_One`` (be sure to read the - code and free the resulting memory if you use these calls). - -* If you passed ``array->dimensions`` and ``array->strides`` around - to functions, you will need to fix some code. These are now - ``npy_intp*`` pointers. On 32-bit systems there won't be a problem. - However, on 64-bit systems, you will need to make changes to avoid - errors and segfaults. - - -The header files ``arrayobject.h`` and ``ufuncobject.h`` contain many defines -that you may find useful. The files ``__ufunc_api.h`` and -``__multiarray_api.h`` contain the available C-API function calls with -their function signatures. - -All of these headers are installed to -``/site-packages/numpy/core/include`` - - -Getting arrays in C-code -========================= - -All new arrays can be created using ``PyArray_NewFromDescr``. A simple interface -equivalent to ``PyArray_FromDims`` is ``PyArray_SimpleNew(nd, dims, typenum)`` -and to ``PyArray_FromDimsAndData`` is -``PyArray_SimpleNewFromData(nd, dims, typenum, data)``. - -This is a very flexible function. - -:: - - PyObject * PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, - int nd, npy_intp *dims, - npy_intp *strides, char *data, - int flags, PyObject *obj); - -``subtype`` : ``PyTypeObject *`` - The subtype that should be created (either pass in - ``&PyArray_Type``, ``&PyBigArray_Type``, or ``obj->ob_type``, - where ``obj`` is a an instance of a subtype (or subclass) of - ``PyArray_Type`` or ``PyBigArray_Type``). - -``descr`` : ``PyArray_Descr *`` - The type descriptor for the array. This is a Python object (this - function steals a reference to it). The easiest way to get one is - using ``PyArray_DescrFromType()``. If you want to use a - flexible size array, then you need to use - ``PyArray_DescrNewFromType()`` and set its ``elsize`` - paramter to the desired size. The typenum in both of these cases - is one of the ``PyArray_XXXX`` enumerated types. - -``nd`` : ``int`` - The number of dimensions (<``MAX_DIMS``) - -``*dims`` : ``npy_intp *`` - A pointer to the size in each dimension. Information will be - copied from here. - -``*strides`` : ``npy_intp *`` - The strides this array should have. For new arrays created by this - routine, this should be ``NULL``. If you pass in memory for this array - to use, then you can pass in the strides information as well - (otherwise it will be created for you and default to C-contiguous - or Fortran contiguous). Any strides will be copied into the array - structure. Do not pass in bad strides information!!!! - - ``PyArray_CheckStrides(...)`` can help but you must call it if you are - unsure. You cannot pass in strides information when data is ``NULL`` - and this routine is creating its own memory. - -``*data`` : ``char *`` - ``NULL`` for creating brand-new memory. If you want this array to wrap - another memory area, then pass the pointer here. You are - responsible for deleting the memory in that case, but do not do so - until the new array object has been deleted. The best way to - handle that is to get the memory from another Python object, - ``INCREF`` that Python object after passing it's data pointer to this - routine, and set the ``->base`` member of the returned array to the - Python object. *You are responsible for* setting ``PyArray_BASE(ret)`` - to the base object. Failure to do so will create a memory leak. - - If you pass in a data buffer, the ``flags`` argument will be the flags - of the new array. If you create a new array, a non-zero flags - argument indicates that you want the array to be in Fortran order. - -``flags`` : ``int`` - Either the flags showing how to interpret the data buffer passed - in, or if a new array is created, nonzero to indicate a Fortran - order array. See below for an explanation of the flags. - -``obj`` : ``PyObject *`` - If subtypes is ``&PyArray_Type`` or ``&PyBigArray_Type``, this argument is - ignored. Otherwise, the ``__array_finalize__`` method of the subtype - is called (if present) and passed this object. This is usually an - array of the type to be created (so the ``__array_finalize__`` method - must handle an array argument. But, it can be anything...) - -Note: The returned array object will be unitialized unless the type is -``PyArray_OBJECT`` in which case the memory will be set to ``NULL``. - -``PyArray_SimpleNew(nd, dims, typenum)`` is a drop-in replacement for -``PyArray_FromDims`` (except it takes ``npy_intp*`` dims instead of ``int*`` dims -which matters on 64-bit systems) and it does not initialize the memory -to zero. - -``PyArray_SimpleNew`` is just a macro for ``PyArray_New`` with default arguments. -Use ``PyArray_FILLWBYTE(arr, 0)`` to fill with zeros. - -The ``PyArray_FromDims`` and family of functions are still available and -are loose wrappers around this function. These functions still take -``int *`` arguments. This should be fine on 32-bit systems, but on 64-bit -systems you may run into trouble if you frequently passed -``PyArray_FromDims`` the dimensions member of the old ``PyArrayObject`` structure -because ``sizeof(npy_intp) != sizeof(int)``. - - -Getting an arrayobject from an arbitrary Python object -====================================================== - -``PyArray_FromAny(...)`` - -This function replaces ``PyArray_ContiguousFromObject`` and friends (those -function calls still remain but they are loose wrappers around the -``PyArray_FromAny`` call). - -:: - - static PyObject * - PyArray_FromAny(PyObject *op, PyArray_Descr *dtype, int min_depth, - int max_depth, int requires, PyObject *context) - - -``op`` : ``PyObject *`` - The Python object to "convert" to an array object - -``dtype`` : ``PyArray_Descr *`` - The desired data-type descriptor. This can be ``NULL``, if the - descriptor should be determined by the object. Unless ``FORCECAST`` is - present in ``flags``, this call will generate an error if the data - type cannot be safely obtained from the object. - -``min_depth`` : ``int`` - The minimum depth of array needed or 0 if doesn't matter - -``max_depth`` : ``int`` - The maximum depth of array allowed or 0 if doesn't matter - -``requires`` : ``int`` - A flag indicating the "requirements" of the returned array. These - are the usual ndarray flags (see `NDArray flags`_ below). In - addition, there are three flags used only for the ``FromAny`` - family of functions: - - - ``ENSURECOPY``: always copy the array. Returned arrays always - have ``CONTIGUOUS``, ``ALIGNED``, and ``WRITEABLE`` set. - - ``ENSUREARRAY``: ensure the returned array is an ndarray (or a - bigndarray if ``op`` is one). - - ``FORCECAST``: cause a cast to occur regardless of whether or - not it is safe. - -``context`` : ``PyObject *`` - If the Python object ``op`` is not an numpy array, but has an - ``__array__`` method, context is passed as the second argument to - that method (the first is the typecode). Almost always this - parameter is ``NULL``. - - -``PyArray_ContiguousFromAny(op, typenum, min_depth, max_depth)`` is -equivalent to ``PyArray_ContiguousFromObject(...)`` (which is still -available), except it will return the subclass if op is already a -subclass of the ndarray. The ``ContiguousFromObject`` version will -always return an ndarray (or a bigndarray). - -Passing Data Type information to C-code -======================================= - -All datatypes are handled using the ``PyArray_Descr *`` structure. -This structure can be obtained from a Python object using -``PyArray_DescrConverter`` and ``PyArray_DescrConverter2``. The former -returns the default ``PyArray_LONG`` descriptor when the input object -is None, while the latter returns ``NULL`` when the input object is ``None``. - -See the ``arraymethods.c`` and ``multiarraymodule.c`` files for many -examples of usage. - -Getting at the structure of the array. --------------------------------------- - -You should use the ``#defines`` provided to access array structure portions: - -- ``PyArray_DATA(obj)`` : returns a ``void *`` to the array data -- ``PyArray_BYTES(obj)`` : return a ``char *`` to the array data -- ``PyArray_ITEMSIZE(obj)`` -- ``PyArray_NDIM(obj)`` -- ``PyArray_DIMS(obj)`` -- ``PyArray_DIM(obj, n)`` -- ``PyArray_STRIDES(obj)`` -- ``PyArray_STRIDE(obj,n)`` -- ``PyArray_DESCR(obj)`` -- ``PyArray_BASE(obj)`` - -see more in ``arrayobject.h`` - - -NDArray Flags -============= - -The ``flags`` attribute of the ``PyArrayObject`` structure contains important -information about the memory used by the array (pointed to by the data member) -This flags information must be kept accurate or strange results and even -segfaults may result. - -There are 6 (binary) flags that describe the memory area used by the -data buffer. These constants are defined in ``arrayobject.h`` and -determine the bit-position of the flag. Python exposes a nice attribute- -based interface as well as a dictionary-like interface for getting -(and, if appropriate, setting) these flags. - -Memory areas of all kinds can be pointed to by an ndarray, necessitating -these flags. If you get an arbitrary ``PyArrayObject`` in C-code, -you need to be aware of the flags that are set. -If you need to guarantee a certain kind of array -(like ``NPY_CONTIGUOUS`` and ``NPY_BEHAVED``), then pass these requirements into the -PyArray_FromAny function. - - -``NPY_CONTIGUOUS`` - True if the array is (C-style) contiguous in memory. -``NPY_FORTRAN`` - True if the array is (Fortran-style) contiguous in memory. - -Notice that contiguous 1-d arrays are always both ``NPY_FORTRAN`` contiguous -and C contiguous. Both of these flags can be checked and are convenience -flags only as whether or not an array is ``NPY_CONTIGUOUS`` or ``NPY_FORTRAN`` -can be determined by the ``strides``, ``dimensions``, and ``itemsize`` -attributes. - -``NPY_OWNDATA`` - True if the array owns the memory (it will try and free it using - ``PyDataMem_FREE()`` on deallocation --- so it better really own it). - -These three flags facilitate using a data pointer that is a memory-mapped -array, or part of some larger record array. But, they may have other uses... - -``NPY_ALIGNED`` - True if the data buffer is aligned for the type and the strides - are multiples of the alignment factor as well. This can be - checked. - -``NPY_WRITEABLE`` - True only if the data buffer can be "written" to. - -``NPY_UPDATEIFCOPY`` - This is a special flag that is set if this array represents a copy - made because a user required certain flags in ``PyArray_FromAny`` and - a copy had to be made of some other array (and the user asked for - this flag to be set in such a situation). The base attribute then - points to the "misbehaved" array (which is set read_only). When - the array with this flag set is deallocated, it will copy its - contents back to the "misbehaved" array (casting if necessary) and - will reset the "misbehaved" array to ``WRITEABLE``. If the - "misbehaved" array was not ``WRITEABLE`` to begin with then - ``PyArray_FromAny`` would have returned an error because ``UPDATEIFCOPY`` - would not have been possible. - - -``PyArray_UpdateFlags(obj, flags)`` will update the ``obj->flags`` for -``flags`` which can be any of ``NPY_CONTIGUOUS``, ``NPY_FORTRAN``, ``NPY_ALIGNED``, or -``NPY_WRITEABLE``. - -Some useful combinations of these flags: - -- ``NPY_BEHAVED = NPY_ALIGNED | NPY_WRITEABLE`` -- ``NPY_CARRAY = NPY_DEFAULT = NPY_CONTIGUOUS | NPY_BEHAVED`` -- ``NPY_CARRAY_RO = NPY_CONTIGUOUS | NPY_ALIGNED`` -- ``NPY_FARRAY = NPY_FORTRAN | NPY_BEHAVED`` -- ``NPY_FARRAY_RO = NPY_FORTRAN | NPY_ALIGNED`` - -The macro ``PyArray_CHECKFLAGS(obj, flags)`` can test any combination of flags. -There are several default combinations defined as macros already -(see ``arrayobject.h``) - -In particular, there are ``ISBEHAVED``, ``ISBEHAVED_RO``, ``ISCARRAY`` -and ``ISFARRAY`` macros that also check to make sure the array is in -native byte order (as determined) by the data-type descriptor. - -There are more C-API enhancements which you can discover in the code, -or buy the book (http://www.trelgol.com) diff --git a/numpy/doc/DISTUTILS.txt b/numpy/doc/DISTUTILS.txt deleted file mode 100644 index b2027e619..000000000 --- a/numpy/doc/DISTUTILS.txt +++ /dev/null @@ -1,546 +0,0 @@ -.. -*- rest -*- - -NumPy Distutils - Users Guide -============================= - -:Author: Pearu Peterson -:Discussions to: scipy-dev@scipy.org -:Created: October 2005 -:Revision: $LastChangedRevision$ -:SVN source: $HeadURL$ - -.. contents:: - -SciPy structure -''''''''''''''' - -Currently SciPy project consists of two packages: - -- NumPy (previously called SciPy core) --- it provides packages like: - - + numpy.distutils - extension to Python distutils - + numpy.f2py - a tool to bind Fortran/C codes to Python - + numpy.core - future replacement of Numeric and numarray packages - + numpy.lib - extra utility functions - + numpy.testing - numpy-style tools for unit testing - + etc - -- SciPy --- a collection of scientific tools for Python. - -The aim of this document is to describe how to add new tools to SciPy. - - -Requirements for SciPy packages -''''''''''''''''''''''''''''''' - -SciPy consists of Python packages, called SciPy packages, that are -available to Python users via the ``scipy`` namespace. Each SciPy package -may contain other SciPy packages. And so on. Therefore, the SciPy -directory tree is a tree of packages with arbitrary depth and width. -Any SciPy package may depend on NumPy packages but the dependence on other -SciPy packages should be kept minimal or zero. - -A SciPy package contains, in addition to its sources, the following -files and directories: - - + ``setup.py`` --- building script - + ``info.py`` --- contains documentation and import flags - + ``__init__.py`` --- package initializer - + ``tests/`` --- directory of unittests - -Their contents are described below. - -The ``setup.py`` file -''''''''''''''''''''' - -In order to add a Python package to SciPy, its build script (``setup.py``) -must meet certain requirements. The most important requirement is that the -package define a ``configuration(parent_package='',top_path=None)`` function -which returns a dictionary suitable for passing to -``numpy.distutils.core.setup(..)``. To simplify the construction of -this dictionary, ``numpy.distutils.misc_util`` provides the -``Configuration`` class, described below. - -SciPy pure Python package example ---------------------------------- - -Below is an example of a minimal ``setup.py`` file for a pure Scipy package:: - - #!/usr/bin/env python - def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('mypackage',parent_package,top_path) - return config - - if __name__ == "__main__": - from numpy.distutils.core import setup - #setup(**configuration(top_path='').todict()) - setup(configuration=configuration) - -The arguments of the ``configuration`` function specifiy the name of -parent SciPy package (``parent_package``) and the directory location -of the main ``setup.py`` script (``top_path``). These arguments, -along with the name of the current package, should be passed to the -``Configuration`` constructor. - -The ``Configuration`` constructor has a fourth optional argument, -``package_path``, that can be used when package files are located in -a different location than the directory of the ``setup.py`` file. - -Remaining ``Configuration`` arguments are all keyword arguments that will -be used to initialize attributes of ``Configuration`` -instance. Usually, these keywords are the same as the ones that -``setup(..)`` function would expect, for example, ``packages``, -``ext_modules``, ``data_files``, ``include_dirs``, ``libraries``, -``headers``, ``scripts``, ``package_dir``, etc. However, the direct -specification of these keywords is not recommended as the content of -these keyword arguments will not be processed or checked for the -consistency of SciPy building system. - -Finally, ``Configuration`` has ``.todict()`` method that returns all -the configuration data as a dictionary suitable for passing on to the -``setup(..)`` function. - -``Configuration`` instance attributes -------------------------------------- - -In addition to attributes that can be specified via keyword arguments -to ``Configuration`` constructor, ``Configuration`` instance (let us -denote as ``config``) has the following attributes that can be useful -in writing setup scripts: - -+ ``config.name`` - full name of the current package. The names of parent - packages can be extracted as ``config.name.split('.')``. - -+ ``config.local_path`` - path to the location of current ``setup.py`` file. - -+ ``config.top_path`` - path to the location of main ``setup.py`` file. - -``Configuration`` instance methods ----------------------------------- - -+ ``config.todict()`` --- returns configuration dictionary suitable for - passing to ``numpy.distutils.core.setup(..)`` function. - -+ ``config.paths(*paths) --- applies ``glob.glob(..)`` to items of - ``paths`` if necessary. Fixes ``paths`` item that is relative to - ``config.local_path``. - -+ ``config.get_subpackage(subpackage_name,subpackage_path=None)`` --- - returns a list of subpackage configurations. Subpackage is looked in the - current directory under the name ``subpackage_name`` but the path - can be specified also via optional ``subpackage_path`` argument. - If ``subpackage_name`` is specified as ``None`` then the subpackage - name will be taken the basename of ``subpackage_path``. - Any ``*`` used for subpackage names are expanded as wildcards. - -+ ``config.add_subpackage(subpackage_name,subpackage_path=None)`` --- - add SciPy subpackage configuration to the current one. The meaning - and usage of arguments is explained above, see - ``config.get_subpackage()`` method. - -+ ``config.add_data_files(*files)`` --- prepend ``files`` to ``data_files`` - list. If ``files`` item is a tuple then its first element defines - the suffix of where data files are copied relative to package installation - directory and the second element specifies the path to data - files. By default data files are copied under package installation - directory. For example, - - :: - - config.add_data_files('foo.dat', - ('fun',['gun.dat','nun/pun.dat','/tmp/sun.dat']), - 'bar/car.dat'. - '/full/path/to/can.dat', - ) - - will install data files to the following locations - - :: - - / - foo.dat - fun/ - gun.dat - pun.dat - sun.dat - bar/ - car.dat - can.dat - - Path to data files can be a function taking no arguments and - returning path(s) to data files -- this is a useful when data files - are generated while building the package. (XXX: explain the step - when this function are called exactly) - -+ ``config.add_data_dir(data_path)`` --- add directory ``data_path`` - recursively to ``data_files``. The whole directory tree starting at - ``data_path`` will be copied under package installation directory. - If ``data_path`` is a tuple then its first element defines - the suffix of where data files are copied relative to package installation - directory and the second element specifies the path to data directory. - By default, data directory are copied under package installation - directory under the basename of ``data_path``. For example, - - :: - - config.add_data_dir('fun') # fun/ contains foo.dat bar/car.dat - config.add_data_dir(('sun','fun')) - config.add_data_dir(('gun','/full/path/to/fun')) - - will install data files to the following locations - - :: - - / - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - bar/ - car.dat - -+ ``config.add_include_dirs(*paths)`` --- prepend ``paths`` to - ``include_dirs`` list. This list will be visible to all extension - modules of the current package. - -+ ``config.add_headers(*files)`` --- prepend ``files`` to ``headers`` - list. By default, headers will be installed under - ``/include/pythonX.X//`` - directory. If ``files`` item is a tuple then it's first argument - specifies the installation suffix relative to - ``/include/pythonX.X/`` path. This is a Python distutils - method; its use is discouraged for NumPy and SciPy in favour of - ``config.add_data_files(*files)``. - -+ ``config.add_scripts(*files)`` --- prepend ``files`` to ``scripts`` - list. Scripts will be installed under ``/bin/`` directory. - -+ ``config.add_extension(name,sources,*kw)`` --- create and add an - ``Extension`` instance to ``ext_modules`` list. The first argument - ``name`` defines the name of the extension module that will be - installed under ``config.name`` package. The second argument is - a list of sources. ``add_extension`` method takes also keyword - arguments that are passed on to the ``Extension`` constructor. - The list of allowed keywords is the following: ``include_dirs``, - ``define_macros``, ``undef_macros``, ``library_dirs``, ``libraries``, - ``runtime_library_dirs``, ``extra_objects``, ``extra_compile_args``, - ``extra_link_args``, ``export_symbols``, ``swig_opts``, ``depends``, - ``language``, ``f2py_options``, ``module_dirs``, ``extra_info``. - - Note that ``config.paths`` method is applied to all lists that - may contain paths. ``extra_info`` is a dictionary or a list - of dictionaries that content will be appended to keyword arguments. - The list ``depends`` contains paths to files or directories - that the sources of the extension module depend on. If any path - in the ``depends`` list is newer than the extension module, then - the module will be rebuilt. - - The list of sources may contain functions ('source generators') - with a pattern ``def (ext, build_dir): return - ``. If ``funcname`` returns ``None``, no sources - are generated. And if the ``Extension`` instance has no sources - after processing all source generators, no extension module will - be built. This is the recommended way to conditionally define - extension modules. Source generator functions are called by the - ``build_src`` command of ``numpy.distutils``. - - For example, here is a typical source generator function:: - - def generate_source(ext,build_dir): - import os - from distutils.dep_util import newer - target = os.path.join(build_dir,'somesource.c') - if newer(target,__file__): - # create target file - return target - - The first argument contains the Extension instance that can be - useful to access its attributes like ``depends``, ``sources``, - etc. lists and modify them during the building process. - The second argument gives a path to a build directory that must - be used when creating files to a disk. - -+ ``config.add_library(name, sources, **build_info)`` --- add - a library to ``libraries`` list. Allowed keywords arguments - are ``depends``, ``macros``, ``include_dirs``, - ``extra_compiler_args``, ``f2py_options``. See ``.add_extension()`` - method for more information on arguments. - -+ ``config.have_f77c()`` --- return True if Fortran 77 compiler is - available (read: a simple Fortran 77 code compiled succesfully). - -+ ``config.have_f90c()`` --- return True if Fortran 90 compiler is - available (read: a simple Fortran 90 code compiled succesfully). - -+ ``config.get_version()`` --- return version string of the current package, - ``None`` if version information could not be detected. This methods - scans files ``__version__.py``, ``_version.py``, - ``version.py``, ``__svn_version__.py`` for string variables - ``version``, ``__version__``, ``_version``. - -+ ``config.make_svn_version_py()`` --- appends a data function to - ``data_files`` list that will generate ``__svn_version__.py`` file - to the current package directory. The file will be removed from - the source directory when Python exits. - -+ ``config.get_build_temp_dir()`` --- return a path to a temporary - directory. This is the place where one should build temporary - files. - -+ ``config.get_distribution()`` --- return distutils ``Distribution`` - instance. - -+ ``config.get_config_cmd()`` --- returns ``numpy.distutils`` config - command instance. - -+ ``config.get_info(*names)`` --- - -Template files --------------- - -XXX: Describe how files with extensions ``.f.src``, ``.pyf.src``, -``.c.src``, etc. are pre-processed by the ``build_src`` command. - -Useful functions in ``numpy.distutils.misc_util`` -------------------------------------------------- - -+ ``get_numpy_include_dirs()`` --- return a list of NumPy base - include directories. NumPy base include directories contain - header files such as ``numpy/arrayobject.h``, ``numpy/funcobject.h`` - etc. For installed NumPy the returned list has length 1 - but when building NumPy the list may contain more directories, - for example, a path to ``config.h`` file that - ``numpy/base/setup.py`` file generates and is used by ``numpy`` - header files. - -+ ``append_path(prefix,path)`` --- smart append ``path`` to ``prefix``. - -+ ``gpaths(paths, local_path='')`` --- apply glob to paths and prepend - ``local_path`` if needed. - -+ ``njoin(*path)`` --- join pathname components + convert ``/``-separated path - to ``os.sep``-separated path and resolve ``..``, ``.`` from paths. - Ex. ``njoin('a',['b','./c'],'..','g') -> os.path.join('a','b','g')``. - -+ ``minrelpath(path)`` --- resolves dots in ``path``. - -+ ``rel_path(path, parent_path)`` --- return ``path`` relative to ``parent_path``. - -+ ``def get_cmd(cmdname,_cache={})`` --- returns ``numpy.distutils`` - command instance. - -+ ``all_strings(lst)`` - -+ ``has_f_sources(sources)`` - -+ ``has_cxx_sources(sources)`` - -+ ``filter_sources(sources)`` --- return ``c_sources, cxx_sources, - f_sources, fmodule_sources`` - -+ ``get_dependencies(sources)`` - -+ ``is_local_src_dir(directory)`` - -+ ``get_ext_source_files(ext)`` - -+ ``get_script_files(scripts)`` - -+ ``get_lib_source_files(lib)`` - -+ ``get_data_files(data)`` - -+ ``dot_join(*args)`` --- join non-zero arguments with a dot. - -+ ``get_frame(level=0)`` --- return frame object from call stack with given level. - -+ ``cyg2win32(path)`` - -+ ``mingw32()`` --- return ``True`` when using mingw32 environment. - -+ ``terminal_has_colors()``, ``red_text(s)``, ``green_text(s)``, - ``yellow_text(s)``, ``blue_text(s)``, ``cyan_text(s)`` - -+ ``get_path(mod_name,parent_path=None)`` --- return path of a module - relative to parent_path when given. Handles also ``__main__`` and - ``__builtin__`` modules. - -+ ``allpath(name)`` --- replaces ``/`` with ``os.sep`` in ``name``. - -+ ``cxx_ext_match``, ``fortran_ext_match``, ``f90_ext_match``, - ``f90_module_name_match`` - -``numpy.distutils.system_info`` module --------------------------------------- - -+ ``get_info(name,notfound_action=0)`` -+ ``combine_paths(*args,**kws)`` -+ ``show_all()`` - -``numpy.distutils.cpuinfo`` module ----------------------------------- - -+ ``cpuinfo`` - -``numpy.distutils.log`` module ------------------------------- - -+ ``set_verbosity(v)`` - - -``numpy.distutils.exec_command`` module ---------------------------------------- - -+ ``get_pythonexe()`` -+ ``find_executable(exe, path=None)`` -+ ``exec_command( command, execute_in='', use_shell=None, use_tee=None, **env )`` - -The ``info.py`` file -'''''''''''''''''''' - -Scipy package import hooks assume that each package contains a -``info.py`` file. This file contains overall documentation about the package -and variables defining the order of package imports, dependency -relations between packages, etc. - -On import, the following information will be looked for in ``info.py``: - -__doc__ - The documentation string of the package. - -__doc_title__ - The title of the package. If not defined then the first non-empty - line of ``__doc__`` will be used. - -__all__ - List of symbols that package exports. Optional. - -global_symbols - List of names that should be imported to numpy name space. To import - all symbols to ``numpy`` namespace, define ``global_symbols=['*']``. - -depends - List of names that the package depends on. Prefix ``numpy.`` - will be automatically added to package names. For example, - use ``testing`` to indicate dependence on ``numpy.testing`` - package. Default value is ``[]``. - -postpone_import - Boolean variable indicating that importing the package should be - postponed until the first attempt of its usage. Default value is ``False``. - Depreciated. - -The ``__init__.py`` file -'''''''''''''''''''''''' - -To speed up the import time and minimize memory usage, numpy -uses ``ppimport`` hooks to transparently postpone importing large modules, -which might not be used during the Scipy session. In order to -have access to the documentation of all Scipy packages, including -postponed packages, the docstring from ``info.py`` is imported -into ``__init__.py``. - -The header of a typical ``__init__.py`` is:: - - # - # Package ... - ... - # - - from info import __doc__ - ... - - from numpy.testing import NumpyTest - test = NumpyTest().test - -The ``tests/`` directory -'''''''''''''''''''''''' - -Ideally, every Python code, extension module, or subpackage in Scipy -package directory should have the corresponding ``test_.py`` -file in ``tests/`` directory. This file should define classes -derived from the ``numpy.testing.TestCase`` class (or from -``unittest.TestCase``) and have names starting with ``test``. The methods -of these classes whose names contain ``test`` or start with ``bench`` are -automatically picked up by the test machinery. - -A minimal example of a ``test_yyy.py`` file that implements tests for -a NumPy package module ``numpy.xxx.yyy`` containing a function -``zzz()``, is shown below:: - - import sys - from numpy.testing import * - - # import xxx symbols - from numpy.xxx.yyy import zzz - - - class test_zzz(TestCase): - def test_simple(self, level=1): - assert zzz()=='Hello from zzz' - #... - - if __name__ == "__main__": - run_module_tests(file) - -Note that all classes that are inherited from ``TestCase`` class, are -automatically picked up by the test runner. - -``numpy.testing`` module provides also the following convenience -functions:: - - assert_equal(actual,desired,err_msg='',verbose=1) - assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=1) - assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=1) - assert_array_equal(x,y,err_msg='') - assert_array_almost_equal(x,y,decimal=6,err_msg='') - rand(*shape) # returns random array with a given shape - -To run all test scripts of the module ``xxx``, execute in Python: - - >>> import numpy - >>> numpy.xxx.test() - -To run only tests for ``xxx.yyy`` module, execute: - - >>> NumpyTest('xxx.yyy').test(level=1,verbosity=1) - -Extra features in NumPy Distutils -''''''''''''''''''''''''''''''''' - -Specifing config_fc options for libraries in setup.py script ------------------------------------------------------------- - -It is possible to specify config_fc options in setup.py scripts. -For example, using - - config.add_library('library', - sources=[...], - config_fc={'noopt':(__file__,1)}) - -will compile the ``library`` sources without optimization flags. - -It's recommended to specify only those config_fc options in such a way -that are compiler independent. - -Getting extra Fortran 77 compiler options from source ------------------------------------------------------ - -Some old Fortran codes need special compiler options in order to -work correctly. In order to specify compiler options per source -file, ``numpy.distutils`` Fortran compiler looks for the following -pattern:: - - CF77FLAGS() = - -in the first 20 lines of the source and use the ``f77flags`` for -specified type of the fcompiler (the first character ``C`` is optional). - -TODO: This feature can be easily extended for Fortran 90 codes as -well. Let us know if you would need such a feature. diff --git a/numpy/doc/EXAMPLE_DOCSTRING.txt b/numpy/doc/EXAMPLE_DOCSTRING.txt deleted file mode 100644 index ee1326474..000000000 --- a/numpy/doc/EXAMPLE_DOCSTRING.txt +++ /dev/null @@ -1,104 +0,0 @@ -.. Here follows an example docstring for a C-function. Note that the - signature is given. This is done only for functions written is C, - since Python cannot find their signature by inspection. For all - other functions, start with the one line description. - - -multivariate_normal(mean, cov[, shape]) - -Draw samples from a multivariate normal distribution. - -The multivariate normal, multinormal or Gaussian distribution is a -generalisation of the one-dimensional normal distribution to higher -dimensions. - -Such a distribution is specified by its mean and covariance matrix, -which are analogous to the mean (average or "centre") and variance -(standard deviation squared or "width") of the one-dimensional normal -distribution. - -Parameters ----------- -mean : (N,) ndarray - Mean of the N-dimensional distribution. -cov : (N,N) ndarray - Covariance matrix of the distribution. -shape : tuple of ints, optional - Given a shape of, for example, (m,n,k), m*n*k samples are - generated, and packed in an m-by-n-by-k arrangement. Because each - sample is N-dimensional, the output shape is (m,n,k,N). If no - shape is specified, a single sample is returned. - -Returns -------- -out : ndarray - The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is is - (m,n,...,N). - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. - -See Also --------- -normal -scipy.stats.distributions.norm : Provides random variates, as well as - probability density function, cumulative - density function, etc. - -Notes ------ -The mean is a coordinate in N-dimensional space, which represents the -location where samples are most likely to be generated. This is -analogous to the peak of the bell curve for the one-dimensional or -univariate normal distribution. - -Covariance indicates the level to which two variables vary together. -From the multivariate normal distribution, we draw N-dimensional -samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix -element :math:`C_ij` is the covariance of :math:`x_i` and :math:`x_j`. -The element :math:`C_ii` is the variance of :math:`x_i` (i.e. its -"spread"). - -Instead of specifying the full covariance matrix, popular -approximations include: - - - Spherical covariance (`cov` is a multiple of the identity matrix) - - Diagonal covariance (`cov` has non-negative elements, and only on - the diagonal) - -This geometrical property can be seen in two dimensions by plotting -generated data-points: - ->>> mean = [0,0] ->>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis ->>> x,y = np.random.multivariate_normal(mean,cov,5000).T - ->>> import matplotlib.pyplot as plt ->>> plt.plot(x,y,'x'); plt.axis('equal'); pyplot.show() - -Note that the covariance matrix must be non-negative definite. - -References ----------- -.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic - Processes," 3rd ed., McGraw-Hill Companies, 1991 -.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," - 2nd ed., Wiley, 2001. - -Examples --------- ->>> mean = (1,2) ->>> cov = [[1,0],[1,0]] ->>> x = np.random.multivariate_normal(mean,cov,(3,3)) ->>> x.shape -(3, 3, 2) - -The following is probably true, given that 0.6 is roughly twice the -standard deviation: - ->>> print list( (x[0,0,:] - mean) < 0.6 ) -[True, True] - -.. index: - :refguide: random:distributions diff --git a/numpy/doc/HOWTO_BUILD_DOCS.txt b/numpy/doc/HOWTO_BUILD_DOCS.txt deleted file mode 100644 index 9b1cca2f0..000000000 --- a/numpy/doc/HOWTO_BUILD_DOCS.txt +++ /dev/null @@ -1,71 +0,0 @@ -========================================= -Building the NumPy API and reference docs -========================================= - -Using Sphinx_ -------------- -`Download `_ -the builder. Follow the instructions in ``README.txt``. - - -Using Epydoc_ -------------- - -Currently, we recommend that you build epydoc from the trunk:: - - svn co https://epydoc.svn.sf.net/svnroot/epydoc/trunk/epydoc epydoc - cd epydoc/src - sudo python setup.py install - -The appearance of some elements can be changed in the epydoc.css -style sheet. - -Emphasized text appearance can be controlled by the definition of the -tag. For instance, to make them bold, insert:: - - em {font-weight: bold;} - -The variables' types are in a span of class rst-classifier, hence can be -changed by inserting something like:: - - span.rst-classifier {font-weight: normal;} - -The first line of the signature should **not** copy the signature unless -the function is written in C, in which case it is mandatory. If the function -signature is generic (uses ``*args`` or ``**kwds``), then a function signature -may be included. - -Use optional in the "type" field for parameters that are non-keyword -optional for C-functions. - -Epydoc depends on Docutils for reStructuredText parsing. You can -download Docutils from the `Docutils sourceforge -page. `_. The version in SVN is -broken, so use 0.4 or the patched version from Debian. You may also -be able to use a package manager like yum to install it:: - - $ sudo yum install python-docutils - - -Example -------- -Here is a short example module, -`plain text `_ -or -`rendered `_ in HTML. - -To try this yourself, simply download the example.py:: - - svn co http://svn.scipy.org/svn/numpy/trunk/numpy/doc/example.py . - -Then, run epydoc:: - - $ epydoc --docformat=restructuredtext example.py - -The output is placed in ``./html``, and may be viewed by loading the -``index.html`` file into your browser. - - - -.. _epydoc: http://epydoc.sourceforge.net/ -.. _sphinx: http://sphinx.pocoo.org diff --git a/numpy/doc/HOWTO_DOCUMENT.txt b/numpy/doc/HOWTO_DOCUMENT.txt deleted file mode 100644 index 03c35283d..000000000 --- a/numpy/doc/HOWTO_DOCUMENT.txt +++ /dev/null @@ -1,430 +0,0 @@ -==================================== -A Guide to NumPy/SciPy Documentation -==================================== - -.. Contents:: - -.. Note:: - - For an accompanying example, see `example.py - `_. - -Overview --------- -In general, we follow the standard Python style conventions as described here: - * `Style Guide for C Code `_ - * `Style Guide for Python Code `_ - * `Docstring Conventions `_ - -Additional PEPs of interest regarding documentation of code: - * `Docstring Processing Framework `_ - * `Docutils Design Specification `_ - -Use a code checker: - * `pylint `_ - * `pyflakes` easy_install pyflakes - * `pep8.py `_ - -The following import conventions are used throughout the NumPy source -and documentation:: - - import numpy as np - import scipy as sp - import matplotlib as mpl - import matplotlib.pyplot as plt - -It is not necessary to do ``import numpy as np`` at the beginning of -an example. However, some sub-modules, such as ``fft``, are not -imported by default, and you have to include them explicitly:: - - import numpy.fft - -after which you may use it:: - - np.fft.fft2(...) - -Docstring Standard ------------------- -A documentation string (docstring) is a string that describes a module, -function, class, or method definition. The docstring is a special attribute -of the object (``object.__doc__``) and, for consistency, is surrounded by -triple double quotes, i.e.:: - - """This is the form of a docstring. - - It can be spread over several lines. - - """ - -NumPy, SciPy_, and the scikits follow a common convention for -docstrings that provides for consistency, while also allowing our -toolchain to produce well-formatted reference guides. This document -describes the current community consensus for such a standard. If you -have suggestions for improvements, post them on the `numpy-discussion -list`_, together with the epydoc output. - -Our docstring standard uses `re-structured text (reST) -`_ syntax and is rendered -using tools like epydoc_ or sphinx_ (pre-processors that understand -the particular documentation style we are using). While a rich set of -markup is available, we limit ourselves to a very basic subset, in -order to provide docstrings that are easy to read on text-only -terminals. - -A guiding principle is that human readers of the text are given -precedence over contorting docstrings so our tools produce nice -output. Rather than sacrificing the readability of the docstrings, we -have written pre-processors to assist tools like epydoc_ and sphinx_ in -their task. - -Status ------- -We are busy converting existing docstrings to the new format, -expanding them where they are lacking, as well as writing new ones for -undocumented functions. Volunteers are welcome to join the effort on -our new documentation system (see the `Developer Zone -`_). - -Sections --------- -The sections of the docstring are: - -1. **Short summary** - - A one-line summary that does not use variable names or the function - name, e.g. - - :: - - def add(a,b): - """The sum of two numbers. - - """ - - The function signature is normally found by introspection and - displayed by the help function. For some functions (notably those - written in C) the signature is not available, so we have to specify - it as the first line of the docstring:: - - """ - add(a,b) - - The sum of two numbers. - - """ - -2. **Extended summary** - - A few sentences giving an extended description. This section - should be used to clarify *functionality*, not to discuss - implementation detail or background theory, which should rather be - explored in the **notes** section below. You may refer to the - parameters and the function name, but parameter descriptions still - belong in the **parameters** section. - -3. **Parameters** - - Description of the function arguments, keywords and their - respective types. - - :: - - Parameters - ---------- - x : type - Description of parameter `x`. - - Enclose variables in single back-tics. If it is not necessary to - specify a keyword argument, use ``optional``:: - - x : int, optional - - Optional keyword parameters have default values, which are - displayed as part of the function signature. They can also be - detailed in the description:: - - Description of parameter `x` (the default is -1, which implies summation - over all axes). - - When a parameter can only assume one of a fixed set of values, - those values can be listed in braces :: - - x : {True, False} - Description of `x`. - -4. **Returns** - - Explanation of the returned values and their types, of the same - format as **parameters**. - -5. **Other parameters** - - An optional section used to describe infrequently used parameters. - It should only be used if a function has a large number of keyword - prameters, to prevent cluttering the **parameters** section. - -6. **Raises** - - An optional section detailing which errors get raised and under - what conditions:: - - Raises - ------ - LinAlgException - If the matrix is not numerically invertible. - -7. **See Also** - - An optional section used to refer to related code. This section - can be very useful, but should be used judiciously. The goal is to - direct users to other functions they may not be aware of, or have - easy means of discovering (by looking at the module docstring, for - example). Routines whose docstrings further explain parameters - used by this function are good candidates. - - As an example, for ``numpy.mean`` we would have:: - - See Also - -------- - average : Weighted average - - When referring to functions in the same sub-module, no prefix is - needed, and the tree is searched upwards for a match. - - Prefix functions from other sub-modules appropriately. E.g., - whilst documenting the ``random`` module, refer to a function in - ``fft`` by - - :: - - fft.fft2 : 2-D fast discrete Fourier transform - - When referring to an entirely different module:: - - scipy.random.norm : Random variates, PDFs, etc. - - Functions may be listed without descriptions:: - - See Also - -------- - func_a : Function a with its description. - func_b, func_c_, func_d - func_e - -8. **Notes** - - An optional section that provides additional information about the - code, possibly including a discussion of the algorithm. This - section may include mathematical equations, written in - `LaTeX `_ format:: - - The FFT is a fast implementation of the discrete Fourier transform: - - .. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n} - - Equations can also be typeset underneath the math directive:: - - The discrete-time Fourier time-convolution property states that - - .. math:: - - x(n) * y(n) \Leftrightarrow X(e^{j\omega } )Y(e^{j\omega } )\\ - another equation here - - Math can furthermore be used inline, i.e. - - :: - - The value of :math:`\omega` is larger than 5. - - Variable names are displayed in typewriter font, obtained by using - ``\mathtt{var}``:: - - We square the input parameter `alpha` to obtain - :math:`\mathtt{alpha}^2`. - - Note that LaTeX is not particularly easy to read, so use equations - sparingly. - - Images are allowed, but should not be central to the explanation; - users viewing the docstring as text must be able to comprehend its - meaning without resorting to an image viewer. These additional - illustrations are included using:: - - .. image:: filename - - where filename is a path relative to the reference guide source - directory. - -9. **References** - - References cited in the **notes** section may be listed here, - e.g. if you cited the article below using the text ``[1]_``, - include it as in the list as follows:: - - .. [1] O. McNoleg, "The integration of GIS, remote sensing, - expert systems and adaptive co-kriging for environmental habitat - modelling of the Highland Haggis using object-oriented, fuzzy-logic - and neural-network techniques," Computers & Geosciences, vol. 22, - pp. 585-588, 1996. - - which renders as - - .. [1] O. McNoleg, "The integration of GIS, remote sensing, - expert systems and adaptive co-kriging for environmental habitat - modelling of the Highland Haggis using object-oriented, fuzzy-logic - and neural-network techniques," Computers & Geosciences, vol. 22, - pp. 585-588, 1996. - - Referencing sources of a temporary nature, like web pages, is - discouraged. References are meant to augment the docstring, but - should not be required to understand it. Follow the `citation - format of the IEEE - `_, which - states that references are numbered, starting from one, in the - order in which they are cited. - -10. **Examples** - - An optional section for examples, using the `doctest - `_ format. - This section is meant to illustrate usage, not to provide a - testing framework -- for that, use the ``tests/`` directory. - While optional, this section is very strongly encouraged. You can - run these examples by doing:: - - >>> import doctest - >>> doctest.testfile('example.py') - - or, using nose, - - :: - - $ nosetests --with-doctest example.py - - Blank lines are used to seperate doctests. When they occur in the - expected output, they should be replaced by ```` (see - `doctest options - `_ for other such - special strings), e.g. - - :: - - >>> print "a\n\nb" - a - - b - - The examples may assume that ``import numpy as np`` is executed before - the example code in *numpy*, and ``import scipy as sp`` in *scipy*. - Additional examples may make use of *matplotlib* for plotting, but should - import it explicitly, e.g., ``import matplotlib.pyplot as plt``. - -11. **Indexing tags*** - - Each function needs to be categorised for indexing purposes. Use - the ``index`` directive:: - - .. index:: - :refguide: ufunc, trigonometry - - To index a function as a sub-category of a class, separate index - entries by a colon, e.g. - - :: - - :refguide: ufunc, numpy:reshape, other - - A `list of available categories - `_ is - available. - -Documenting classes -------------------- - -Class docstring -``````````````` -Use the same sections as outlined above (all except ``Returns`` are -applicable). The constructor (``__init__``) should also be documented -here. - -An ``Attributes`` section may be used to describe class variables:: - - Attributes - ---------- - x : float - The X coordinate. - y : float - The Y coordinate. - -In general, it is not necessary to list class methods. Those that are -not part of the public API have names that start with an underscore. -In some cases, however, a class may have a great many methods, of -which only a few are relevant (e.g., subclasses of ndarray). Then, it -becomes useful to have an additional ``Methods`` section:: - - class Photo(ndarray): - """ - Array with associated photographic information. - - ... - - Attributes - ---------- - exposure : float - Exposure in seconds. - - Methods - ------- - colorspace(c='rgb') - Represent the photo in the given colorspace. - gamma(n=1.0) - Change the photo's gamma exposure. - - """ - -Note that `self` is *not* listed as the first parameter of methods. - -Method docstrings -````````````````` -Document these as you would any other function. Do not include -``self`` in the list of parameters. - -Common reST concepts --------------------- -For paragraphs, indentation is significant and indicates indentation in the -output. New paragraphs are marked with a blank line. - -Use *italics*, **bold**, and ``courier`` if needed in any explanations -(but not for variable names and doctest code or multi-line code). -Variable, module and class names should be written between single -backticks (```numpy```). - -A more extensive example of reST markup can be found in `this example -document `_; -the `quick reference -`_ is -useful while editing. - -Line spacing and indentation are significant and should be carefully -followed. - -Conclusion ----------- - -`An example -`_ of the -format shown here is available. Refer to `How to Build API/Reference -Documentation -`_ -on how to use epydoc_ or sphinx_ to construct a manual and web page. - -This document itself was written in ReStructuredText, and may be converted to -HTML using:: - - $ rst2html HOWTO_DOCUMENT.txt HOWTO_DOCUMENT.html - -.. _SciPy: http://www.scipy.org -.. _numpy-discussion list: http://www.scipy.org/Mailing_Lists -.. _epydoc: http://epydoc.sourceforge.net/ -.. _sphinx: http://sphinx.pocoo.org diff --git a/numpy/doc/README.txt b/numpy/doc/README.txt deleted file mode 100644 index eacc3659e..000000000 --- a/numpy/doc/README.txt +++ /dev/null @@ -1,15 +0,0 @@ -Very complete documentation is available from the primary developer of -NumPy for a small fee. After a brief period, that documentation -will become freely available. See http://www.trelgol.com for -details. The fee and restriction period is intended to allow people -and to encourage companies to easily contribute to the development of -NumPy. - -This directory will contain all public documentation that becomes available. - -Very good documentation is also available using Python's (and -especially IPython's) own help system. Most of the functions have -docstrings that provide usage assistance. - - - diff --git a/numpy/doc/__init__.py b/numpy/doc/__init__.py index 394f0b548..8664ea04d 100644 --- a/numpy/doc/__init__.py +++ b/numpy/doc/__init__.py @@ -1,2 +1,12 @@ -from numpy.doc.reference import * -del reference +import os + +ref_dir = os.path.join(os.path.dirname(__file__)) + +__all__ = [f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and + not f.startswith('__')] +__all__.sort() + +__doc__ = 'The following topics are available:\n' + \ + '\n - '.join([''] + __all__) + +__all__.extend(['__doc__']) diff --git a/numpy/doc/basics.py b/numpy/doc/basics.py new file mode 100644 index 000000000..dfb8fe74d --- /dev/null +++ b/numpy/doc/basics.py @@ -0,0 +1,137 @@ +""" +============ +Array basics +============ + +Array types and conversions between types +========================================= + +Numpy supports a much greater variety of numerical types than Python does. +This section shows which are available, and how to modify an array's data-type. + +========== ========================================================= +Data type Description +========== ========================================================= +bool Boolean (True or False) stored as a byte +int Platform integer (normally either ``int32`` or ``int64``) +int8 Byte (-128 to 127) +int16 Integer (-32768 to 32767) +int32 Integer (-2147483648 to 2147483647) +int64 Integer (9223372036854775808 to 9223372036854775807) +uint8 Unsigned integer (0 to 255) +uint16 Unsigned integer (0 to 65535) +uint32 Unsigned integer (0 to 4294967295) +uint64 Unsigned integer (0 to 18446744073709551615) +float Shorthand for ``float64``. +float32 Single precision float: sign bit, 8 bits exponent, + 23 bits mantissa +float64 Double precision float: sign bit, 11 bits exponent, + 52 bits mantissa +complex Shorthand for ``complex128``. +complex64 Complex number, represented by two 32-bit floats (real + and imaginary components) +complex128 Complex number, represented by two 64-bit floats (real + and imaginary components) +========== ========================================================= + +Numpy numerical types are instances of ``dtype`` (data-type) objects, each +having unique characteristics. Once you have imported NumPy using + + :: + + >>> import numpy as np + +the dtypes are available as ``np.bool``, ``np.float32``, etc. + +Advanced types, not listed in the table above, are explored in +section `link_here`. + +There are 5 basic numerical types representing booleans (bool), integers (int), +unsigned integers (uint) floating point (float) and complex. Those with numbers +in their name indicate the bitsize of the type (i.e. how many bits are needed +to represent a single value in memory). Some types, such as ``int`` and +``intp``, have differing bitsizes, dependent on the platforms (e.g. 32-bit +vs. 64-bit machines). This should be taken into account when interfacing +with low-level code (such as C or Fortran) where the raw memory is addressed. + +Data-types can be used as functions to convert python numbers to array scalars +(see the array scalar section for an explanation), python sequences of numbers +to arrays of that type, or as arguments to the dtype keyword that many numpy +functions or methods accept. Some examples:: + + >>> import numpy as np + >>> x = np.float32(1.0) + >>> x + 1.0 + >>> y = np.int_([1,2,4]) + >>> y + array([1, 2, 4]) + >>> z = np.arange(3, dtype=np.uint8) + array([0, 1, 2], dtype=uint8) + +Array types can also be referred to by character codes, mostly to retain +backward compatibility with older packages such as Numeric. Some +documentation may still refer to these, for example:: + + >>> np.array([1, 2, 3], dtype='f') + array([ 1., 2., 3.], dtype=float32) + +We recommend using dtype objects instead. + +To convert the type of an array, use the .astype() method (preferred) or +the type itself as a function. For example: :: + + >>> z.astype(float) + array([0., 1., 2.]) + >>> np.int8(z) + array([0, 1, 2], dtype=int8) + +Note that, above, we use the *Python* float object as a dtype. NumPy knows +that ``int`` refers to ``np.int``, ``bool`` means ``np.bool`` and +that ``float`` is ``np.float``. The other data-types do not have Python +equivalents. + +To determine the type of an array, look at the dtype attribute:: + + >>> z.dtype + dtype('uint8') + +dtype objects also contain information about the type, such as its bit-width +and its byte-order. See xxx for details. The data type can also be used +indirectly to query properties of the type, such as whether it is an integer:: + + >>> d = np.dtype(int) + >>> d + dtype('int32') + + >>> np.issubdtype(d, int) + True + + >>> np.issubdtype(d, float) + False + + +Array Scalars +============= + +Numpy generally returns elements of arrays as array scalars (a scalar +with an associated dtype). Array scalars differ from Python scalars, but +for the most part they can be used interchangeably (the primary +exception is for versions of Python older than v2.x, where integer array +scalars cannot act as indices for lists and tuples). There are some +exceptions, such as when code requires very specific attributes of a scalar +or when it checks specifically whether a value is a Python scalar. Generally, +problems are easily fixed by explicitly converting array scalars +to Python scalars, using the corresponding Python type function +(e.g., ``int``, ``float``, ``complex``, ``str``, ``unicode``). + +The primary advantage of using array scalars is that +they preserve the array type (Python may not have a matching scalar type +available, e.g. ``int16``). Therefore, the use of array scalars ensures +identical behaviour between arrays and scalars, irrespective of whether the +value is inside an array or not. NumPy scalars also have many of the same +methods arrays do. + +See xxx for details. + +""" diff --git a/numpy/doc/broadcasting.py b/numpy/doc/broadcasting.py new file mode 100644 index 000000000..95e9b67f9 --- /dev/null +++ b/numpy/doc/broadcasting.py @@ -0,0 +1,176 @@ +""" +======================== +Broadcasting over arrays +======================== + +The term broadcasting describes how numpy treats arrays with different +shapes during arithmetic operations. Subject to certain constraints, +the smaller array is "broadcast" across the larger array so that they +have compatible shapes. Broadcasting provides a means of vectorizing +array operations so that looping occurs in C instead of Python. It does +this without making needless copies of data and usually leads to +efficient algorithm implementations. There are, however, cases where +broadcasting is a bad idea because it leads to inefficient use of memory +that slows computation. + +NumPy operations are usually done element-by-element, which requires two +arrays to have exactly the same shape:: + + >>> a = np.array([1.0, 2.0, 3.0]) + >>> b = np.array([2.0, 2.0, 2.0]) + >>> a * b + array([ 2., 4., 6.]) + +NumPy's broadcasting rule relaxes this constraint when the arrays' +shapes meet certain constraints. The simplest broadcasting example occurs +when an array and a scalar value are combined in an operation: + +>>> a = np.array([1.0, 2.0, 3.0]) +>>> b = 2.0 +>>> a * b +array([ 2., 4., 6.]) + +The result is equivalent to the previous example where ``b`` was an array. +We can think of the scalar ``b`` being *stretched* during the arithmetic +operation into an array with the same shape as ``a``. The new elements in +``b`` are simply copies of the original scalar. The stretching analogy is +only conceptual. NumPy is smart enough to use the original scalar value +without actually making copies, so that broadcasting operations are as +memory and computationally efficient as possible. + +The second example is more effective than the first, since here broadcasting +moves less memory around during the multiplication (``b`` is a scalar, +not an array). + +General Broadcasting Rules +========================== +When operating on two arrays, NumPy compares their shapes element-wise. +It starts with the trailing dimensions, and works its way forward. Two +dimensions are compatible when + +1) they are equal, or +2) one of them is 1 + +If these conditions are not met, a +``ValueError: frames are not aligned`` exception is thrown, indicating that +the arrays have incompatible shapes. The size of the resulting array +is the maximum size along each dimension of the input arrays. + +Arrays do not need to have the same *number* of dimensions. For example, +if you have a ``256x256x3`` array of RGB values, and you want to scale +each color in the image by a different value, you can multiply the image +by a one-dimensional array with 3 values. Lining up the sizes of the +trailing axes of these arrays according to the broadcast rules, shows that +they are compatible:: + + Image (3d array): 256 x 256 x 3 + Scale (1d array): 3 + Result (3d array): 256 x 256 x 3 + +When either of the dimensions compared is one, the larger of the two is +used. In other words, the smaller of two axes is stretched or "copied" +to match the other. + +In the following example, both the ``A`` and ``B`` arrays have axes with +length one that are expanded to a larger size during the broadcast +operation:: + + A (4d array): 8 x 1 x 6 x 1 + B (3d array): 7 x 1 x 5 + Result (4d array): 8 x 7 x 6 x 5 + +Here are some more examples:: + + A (2d array): 5 x 4 + B (1d array): 1 + Result (2d array): 5 x 4 + + A (2d array): 5 x 4 + B (1d array): 4 + Result (2d array): 5 x 4 + + A (3d array): 15 x 3 x 5 + B (3d array): 15 x 1 x 5 + Result (3d array): 15 x 3 x 5 + + A (3d array): 15 x 3 x 5 + B (2d array): 3 x 5 + Result (3d array): 15 x 3 x 5 + + A (3d array): 15 x 3 x 5 + B (2d array): 3 x 1 + Result (3d array): 15 x 3 x 5 + +Here are examples of shapes that do not broadcast:: + + A (1d array): 3 + B (1d array): 4 # trailing dimensions do not match + + A (2d array): 2 x 1 + B (3d array): 8 x 4 x 3 # second from last dimensions mismatch + +An example of broadcasting in practice:: + + >>> x = np.arange(4) + >>> xx = x.reshape(4,1) + >>> y = np.ones(5) + >>> z = np.ones((3,4)) + + >>> x.shape + (4,) + + >>> y.shape + (5,) + + >>> x + y + : shape mismatch: objects cannot be broadcast to a single shape + + >>> xx.shape + (4, 1) + + >>> y.shape + (5,) + + >>> (xx + y).shape + (4, 5) + + >>> xx + y + array([[ 1., 1., 1., 1., 1.], + [ 2., 2., 2., 2., 2.], + [ 3., 3., 3., 3., 3.], + [ 4., 4., 4., 4., 4.]]) + + >>> x.shape + (4,) + + >>> z.shape + (3, 4) + + >>> (x + z).shape + (3, 4) + + >>> x + z + array([[ 1., 2., 3., 4.], + [ 1., 2., 3., 4.], + [ 1., 2., 3., 4.]]) + +Broadcasting provides a convenient way of taking the outer product (or +any other outer operation) of two arrays. The following example shows an +outer addition operation of two 1-d arrays:: + + >>> a = np.array([0.0, 10.0, 20.0, 30.0]) + >>> b = np.array([1.0, 2.0, 3.0]) + >>> a[:, np.newaxis] + b + array([[ 1., 2., 3.], + [ 11., 12., 13.], + [ 21., 22., 23.], + [ 31., 32., 33.]]) + +Here the ``newaxis`` index operator inserts a new axis into ``a``, +making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array +with ``b``, which has shape ``(3,)``, yields a ``4x3`` array. + +See `this article `_ +for illustrations of broadcasting concepts. + +""" diff --git a/numpy/doc/creation.py b/numpy/doc/creation.py new file mode 100644 index 000000000..1e80e5115 --- /dev/null +++ b/numpy/doc/creation.py @@ -0,0 +1,132 @@ +""" +============== +Array creation +============== + +Introduction +============ + +There are 5 general mechanisms for creating arrays: + +1) Conversion from other Python structures (e.g., lists, tuples) +2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros, etc.) +3) Reading arrays from disk, either from standard or custom formats +4) Creating arrays from raw bytes through the use of strings or buffers +5) Use of special library functions (e.g., random) + +This section will not cover means of replicating, joining, or otherwise +expanding or mutating existing arrays. Nor will it cover creating object +arrays or record arrays. Both of those are covered in their own sections. + +Converting Python array-like objects to numpy arrays +==================================================== + +In general, numerical data arranged in an array-like structure in Python can +be converted to arrays through the use of the array() function. The most obvious +examples are lists and tuples. See the documentation for array() for details for +its use. Some +objects may support the array-protocol and allow conversion to arrays this +way. A simple way to find out if the object can be converted to a numpy array +using array() is simply to try it interactively and see if it works! (The +Python Way). + +Examples: :: + + >>> x = np.array([2,3,1,0]) + >>> x = np.array([2, 3, 1, 0]) + >>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists, and types + >>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]]) + +Intrinsic numpy array creation +============================== + +Numpy has built-in functions for creating arrays from scratch: + +zeros(shape) will create an array filled with 0 values with the specified +shape. The default dtype is float64. + +``>>> np.zeros((2, 3)) +array([[ 0., 0., 0.], [ 0., 0., 0.]])`` + +ones(shape) will create an array filled with 1 values. It is identical to +zeros in all other respects. + +arange() will create arrays with regularly incrementing values. Check the +docstring for complete information on the various ways it can be used. A few +examples will be given here: :: + + >>> np.arange(10) + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.arange(2, 10, dtype=np.float) + array([ 2., 3., 4., 5., 6., 7., 8., 9.]) + >>> np.arange(2, 3, 0.1) + array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9]) + +Note that there are some subtleties regarding the last usage that the user +should be aware of that are described in the arange docstring. + +indices() will create a set of arrays (stacked as a one-higher dimensioned +array), one per dimension with each representing variation in that dimension. +An examples illustrates much better than a verbal description: :: + + >>> np.indices((3,3)) + array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]]) + +This is particularly useful for evaluating functions of multiple dimensions on +a regular grid. + +Reading arrays from disk +======================== + +This is presumably the most common case of large array creation. The details, +of course, depend greatly on the format of data on disk and so this section +can only give general pointers on how to handle various formats. + +Standard binary formats +----------------------- + +Various fields have standard formats for array data. The following lists the +ones with known python libraries to read them and return numpy arrays (there +may be others for which it is possible to read and convert to numpy arrays so +check the last section as well) + +HDF5: PyTables +FITS: PyFITS +Others? xxx + +Examples of formats that cannot be read directly but for which it is not hard +to convert are libraries like PIL (able to read and write many image formats +such as jpg, png, etc). + +Common ascii formats +-------------------- + +Comma Separated Value files (CSV) are widely used (and an export and import +option for programs like Excel). There are a number of ways of reading these +files in Python. The most convenient ways of reading these are found in pylab +(part of matplotlib) in the xxx function. (list alternatives xxx) + +More generic ascii files can be read using the io package in scipy. xxx a few +more details needed... + +Custom binary formats +--------------------- + +There are a variety of approaches one can use. If the file has a relatively +simple format then one can write a simple I/O library and use the numpy +fromfile() function and .tofile() method to read and write numpy arrays +directly (mind your byteorder though!) If a good C or C++ library exists that +read the data, one can wrap that library with a variety of techniques (see +xxx) though that certainly is much more work and requires significantly more +advanced knowledge to interface with C or C++. + +Use of special libraries +------------------------ + +There are libraries that can be used to generate arrays for special purposes +and it isn't possible to enumerate all of them. The most common uses are use +of the many array generation functions in random that can generate arrays of +random values, and some utility functions to generate special matrices (e.g. +diagonal, see xxx) + +""" diff --git a/numpy/doc/cython/MANIFEST b/numpy/doc/cython/MANIFEST deleted file mode 100644 index feb3ec22a..000000000 --- a/numpy/doc/cython/MANIFEST +++ /dev/null @@ -1,2 +0,0 @@ -numpyx.pyx -setup.py diff --git a/numpy/doc/cython/Makefile b/numpy/doc/cython/Makefile deleted file mode 100644 index 7c9c72981..000000000 --- a/numpy/doc/cython/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# Simple makefile to quickly access handy build commands for Cython extension -# code generation. Note that the actual code to produce the extension lives in -# the setup.py file, this Makefile is just meant as a command -# convenience/reminder while doing development. - -help: - @echo "Numpy/Cython tasks. Available tasks:" - @echo "ext -> build the Cython extension module." - @echo "html -> create annotated HTML from the .pyx sources" - @echo "test -> run a simple test demo." - @echo "all -> Call ext, html and finally test." - -all: ext html test - -ext: numpyx.so - -test: ext - python run_test.py - -html: numpyx.pyx.html - -numpyx.so: numpyx.pyx numpyx.c - python setup.py build_ext --inplace - -numpyx.pyx.html: numpyx.pyx - cython -a numpyx.pyx - @echo "Annotated HTML of the C code generated in numpyx.html" - -# Phony targets for cleanup and similar uses - -.PHONY: clean -clean: - rm -rf *~ *.so *.c *.o *.html build - -# Suffix rules -%.c : %.pyx - cython $< diff --git a/numpy/doc/cython/README.txt b/numpy/doc/cython/README.txt deleted file mode 100644 index ff0abb0fe..000000000 --- a/numpy/doc/cython/README.txt +++ /dev/null @@ -1,20 +0,0 @@ -================== - NumPy and Cython -================== - -This directory contains a small example of how to use NumPy and Cython -together. While much work is planned for the Summer of 2008 as part of the -Google Summer of Code project to improve integration between the two, even -today Cython can be used effectively to write optimized code that accesses -NumPy arrays. - -The example provided is just a stub showing how to build an extension and -access the array objects; improvements to this to show more sophisticated tasks -are welcome. - -To run it locally, simply type:: - - make help - -which shows you the currently available targets (these are just handy -shorthands for common commands). \ No newline at end of file diff --git a/numpy/doc/cython/c_numpy.pxd b/numpy/doc/cython/c_numpy.pxd deleted file mode 100644 index 4a0bd1c01..000000000 --- a/numpy/doc/cython/c_numpy.pxd +++ /dev/null @@ -1,136 +0,0 @@ -# :Author: Travis Oliphant - -# API declaration section. This basically exposes the NumPy C API to -# Pyrex/Cython programs. - -cdef extern from "numpy/arrayobject.h": - - cdef enum NPY_TYPES: - NPY_BOOL - NPY_BYTE - NPY_UBYTE - NPY_SHORT - NPY_USHORT - NPY_INT - NPY_UINT - NPY_LONG - NPY_ULONG - NPY_LONGLONG - NPY_ULONGLONG - NPY_FLOAT - NPY_DOUBLE - NPY_LONGDOUBLE - NPY_CFLOAT - NPY_CDOUBLE - NPY_CLONGDOUBLE - NPY_OBJECT - NPY_STRING - NPY_UNICODE - NPY_VOID - NPY_NTYPES - NPY_NOTYPE - - cdef enum requirements: - NPY_CONTIGUOUS - NPY_FORTRAN - NPY_OWNDATA - NPY_FORCECAST - NPY_ENSURECOPY - NPY_ENSUREARRAY - NPY_ELEMENTSTRIDES - NPY_ALIGNED - NPY_NOTSWAPPED - NPY_WRITEABLE - NPY_UPDATEIFCOPY - NPY_ARR_HAS_DESCR - - NPY_BEHAVED - NPY_BEHAVED_NS - NPY_CARRAY - NPY_CARRAY_RO - NPY_FARRAY - NPY_FARRAY_RO - NPY_DEFAULT - - NPY_IN_ARRAY - NPY_OUT_ARRAY - NPY_INOUT_ARRAY - NPY_IN_FARRAY - NPY_OUT_FARRAY - NPY_INOUT_FARRAY - - NPY_UPDATE_ALL - - cdef enum defines: - NPY_MAXDIMS - - ctypedef struct npy_cdouble: - double real - double imag - - ctypedef struct npy_cfloat: - double real - double imag - - ctypedef int npy_intp - - ctypedef extern class numpy.dtype [object PyArray_Descr]: - cdef int type_num, elsize, alignment - cdef char type, kind, byteorder, hasobject - cdef object fields, typeobj - - ctypedef extern class numpy.ndarray [object PyArrayObject]: - cdef char *data - cdef int nd - cdef npy_intp *dimensions - cdef npy_intp *strides - cdef object base - cdef dtype descr - cdef int flags - - ctypedef extern class numpy.flatiter [object PyArrayIterObject]: - cdef int nd_m1 - cdef npy_intp index, size - cdef ndarray ao - cdef char *dataptr - - ctypedef extern class numpy.broadcast [object PyArrayMultiIterObject]: - cdef int numiter - cdef npy_intp size, index - cdef int nd - cdef npy_intp *dimensions - cdef void **iters - - object PyArray_ZEROS(int ndims, npy_intp* dims, NPY_TYPES type_num, int fortran) - object PyArray_EMPTY(int ndims, npy_intp* dims, NPY_TYPES type_num, int fortran) - dtype PyArray_DescrFromTypeNum(NPY_TYPES type_num) - object PyArray_SimpleNew(int ndims, npy_intp* dims, NPY_TYPES type_num) - int PyArray_Check(object obj) - object PyArray_ContiguousFromAny(object obj, NPY_TYPES type, - int mindim, int maxdim) - object PyArray_ContiguousFromObject(object obj, NPY_TYPES type, - int mindim, int maxdim) - npy_intp PyArray_SIZE(ndarray arr) - npy_intp PyArray_NBYTES(ndarray arr) - void *PyArray_DATA(ndarray arr) - object PyArray_FromAny(object obj, dtype newtype, int mindim, int maxdim, - int requirements, object context) - object PyArray_FROMANY(object obj, NPY_TYPES type_num, int min, - int max, int requirements) - object PyArray_NewFromDescr(object subtype, dtype newtype, int nd, - npy_intp* dims, npy_intp* strides, void* data, - int flags, object parent) - - object PyArray_FROM_OTF(object obj, NPY_TYPES type, int flags) - object PyArray_EnsureArray(object) - - object PyArray_MultiIterNew(int n, ...) - - char *PyArray_MultiIter_DATA(broadcast multi, int i) - void PyArray_MultiIter_NEXTi(broadcast multi, int i) - void PyArray_MultiIter_NEXT(broadcast multi) - - object PyArray_IterNew(object arr) - void PyArray_ITER_NEXT(flatiter it) - - void import_array() diff --git a/numpy/doc/cython/c_python.pxd b/numpy/doc/cython/c_python.pxd deleted file mode 100644 index 46d2fd1a7..000000000 --- a/numpy/doc/cython/c_python.pxd +++ /dev/null @@ -1,62 +0,0 @@ -# :Author: Robert Kern -# :Copyright: 2004, Enthought, Inc. -# :License: BSD Style - - -cdef extern from "Python.h": - # Not part of the Python API, but we might as well define it here. - # Note that the exact type doesn't actually matter for Pyrex. - ctypedef int size_t - - # Some type declarations we need - ctypedef int Py_intptr_t - - - # String API - char* PyString_AsString(object string) - char* PyString_AS_STRING(object string) - object PyString_FromString(char* c_string) - object PyString_FromStringAndSize(char* c_string, int length) - object PyString_InternFromString(char *v) - - # Float API - object PyFloat_FromDouble(double v) - double PyFloat_AsDouble(object ob) - long PyInt_AsLong(object ob) - - - # Memory API - void* PyMem_Malloc(size_t n) - void* PyMem_Realloc(void* buf, size_t n) - void PyMem_Free(void* buf) - - void Py_DECREF(object obj) - void Py_XDECREF(object obj) - void Py_INCREF(object obj) - void Py_XINCREF(object obj) - - # CObject API - ctypedef void (*destructor1)(void* cobj) - ctypedef void (*destructor2)(void* cobj, void* desc) - int PyCObject_Check(object p) - object PyCObject_FromVoidPtr(void* cobj, destructor1 destr) - object PyCObject_FromVoidPtrAndDesc(void* cobj, void* desc, - destructor2 destr) - void* PyCObject_AsVoidPtr(object self) - void* PyCObject_GetDesc(object self) - int PyCObject_SetVoidPtr(object self, void* cobj) - - # TypeCheck API - int PyFloat_Check(object obj) - int PyInt_Check(object obj) - - # Error API - int PyErr_Occurred() - void PyErr_Clear() - int PyErr_CheckSignals() - -cdef extern from "string.h": - void *memcpy(void *s1, void *s2, int n) - -cdef extern from "math.h": - double fabs(double x) diff --git a/numpy/doc/cython/numpyx.pyx b/numpy/doc/cython/numpyx.pyx deleted file mode 100644 index cbc786ef0..000000000 --- a/numpy/doc/cython/numpyx.pyx +++ /dev/null @@ -1,127 +0,0 @@ -# -*- Mode: Python -*- Not really, but close enough -"""Cython access to Numpy arrays - simple example. -""" - -############################################################################# -# Load C APIs declared in .pxd files via cimport -# -# A 'cimport' is similar to a Python 'import' statement, but it provides access -# to the C part of a library instead of its Python-visible API. See the -# Pyrex/Cython documentation for details. - -cimport c_python as py - -cimport c_numpy as cnp - -# NOTE: numpy MUST be initialized before any other code is executed. -cnp.import_array() - -############################################################################# -# Load Python modules via normal import statements - -import numpy as np - -############################################################################# -# Regular code section begins - -# A 'def' function is visible in the Python-imported module -def print_array_info(cnp.ndarray arr): - """Simple information printer about an array. - - Code meant to illustrate Cython/NumPy integration only.""" - - cdef int i - - print '-='*10 - # Note: the double cast here (void * first, then py.Py_intptr_t) is needed - # in Cython but not in Pyrex, since the casting behavior of cython is - # slightly different (and generally safer) than that of Pyrex. In this - # case, we just want the memory address of the actual Array object, so we - # cast it to void before doing the py.Py_intptr_t cast: - print 'Printing array info for ndarray at 0x%0lx'% \ - (arr,) - print 'number of dimensions:',arr.nd - print 'address of strides: 0x%0lx'%(arr.strides,) - print 'strides:' - for i from 0<=iarr.strides[i] - print 'memory dump:' - print_elements( arr.data, arr.strides, arr.dimensions, - arr.nd, sizeof(double), arr.dtype ) - print '-='*10 - print - -# A 'cdef' function is NOT visible to the python side, but it is accessible to -# the rest of this Cython module -cdef print_elements(char *data, - py.Py_intptr_t* strides, - py.Py_intptr_t* dimensions, - int nd, - int elsize, - object dtype): - cdef py.Py_intptr_t i,j - cdef void* elptr - - if dtype not in [np.dtype(np.object_), - np.dtype(np.float64)]: - print ' print_elements() not (yet) implemented for dtype %s'%dtype.name - return - - if nd ==0: - if dtype==np.dtype(np.object_): - elptr = (data)[0] #[0] dereferences pointer in Pyrex - print ' ',elptr - elif dtype==np.dtype(np.float64): - print ' ',(data)[0] - elif nd == 1: - for i from 0<=idata)[0] - print ' ',elptr - elif dtype==np.dtype(np.float64): - print ' ',(data)[0] - data = data + strides[0] - else: - for i from 0<=i>> a=[1,2,3] - >>> print [x + 3 for x in a] - [4, 5, 6] - >>> print "a\n\nb" - a - - b - - """ - - pass diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py new file mode 100644 index 000000000..6a182adf4 --- /dev/null +++ b/numpy/doc/glossary.py @@ -0,0 +1,367 @@ +""" +================= +Glossary +================= + +along an axis + Axes are defined for arrays with more than one dimension. A + 2-dimensional array has two corresponding axes: the first running + vertically downwards across rows (axis 0), and the second running + horizontally across columns (axis 1). + + Many operation can take place along one of these axes. For example, + we can sum each row of an array, in which case we operate along + columns, or axis 1:: + + >>> x = np.arange(12).reshape((3,4)) + + >>> x + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + + >>> x.sum(axis=1) + array([ 6, 22, 38]) + +array or ndarray + A homogeneous container of numerical elements. Each element in the + array occupies a fixed amount of memory (hence homogeneous), and + can be a numerical element of a single type (such as float, int + or complex) or a combination (such as ``(float, int, float)``). Each + array has an associated data-type (or ``dtype``), which describes + the numerical type of its elements:: + + >>> x = np.array([1, 2, 3], float) + + >>> x + array([ 1., 2., 3.]) + + >>> x.dtype # floating point number, 64 bits of memory per element + dtype('float64') + + + # More complicated data type: each array element is a combination of + # and integer and a floating point number + >>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)]) + array([(1, 2.0), (3, 4.0)], + dtype=[('x', '>> x = np.array([1, 2, 3]) + >>> x.shape + (3,) + +broadcast + NumPy can do operations on arrays whose shapes are mismatched:: + + >>> x = np.array([1, 2]) + >>> y = np.array([[3], [4]]) + + >>> x + array([1, 2]) + + >>> y + array([[3], + [4]]) + + >>> x + y + array([[4, 5], + [5, 6]]) + + See `doc.broadcasting`_ for more information. + +decorator + An operator that transforms a function. For example, a ``log`` + decorator may be defined to print debugging information upon + function execution:: + + >>> def log(f): + ... def new_logging_func(*args, **kwargs): + ... print "Logging call with parameters:", args, kwargs + ... return f(*args, **kwargs) + ... + ... return new_logging_func + + Now, when we define a function, we can "decorate" it using ``log``:: + + >>> @log + ... def add(a, b): + ... return a + b + + Calling ``add`` then yields: + + >>> add(1, 2) + Logging call with parameters: (1, 2) {} + 3 + +dictionary + Resembling a language dictionary, which provides a mapping between + words and descriptions thereof, a Python dictionary is a mapping + between two objects:: + + >>> x = {1: 'one', 'two': [1, 2]} + + Here, `x` is a dictionary mapping keys to values, in this case + the integer 1 to the string "one", and the string "two" to + the list ``[1, 2]``. The values may be accessed using their + corresponding keys:: + + >>> x[1] + 'one' + + >>> x['two'] + [1, 2] + + Note that dictionaries are not stored in any specific order. Also, + most mutable (see *immutable* below) objects, such as lists, may not + be used as keys. + + For more information on dictionaries, read the + `Python tutorial `_. + +immutable + An object that cannot be modified after execution is called + immutable. Two common examples are strings and tuples. + +instance + A class definition gives the blueprint for constructing an object:: + + >>> class House(object): + ... wall_colour = 'white' + + Yet, we have to *build* a house before it exists:: + + >>> h = House() # build a house + + Now, ``h`` is called a ``House`` instance. An instance is therefore + a specific realisation of a class. + +iterable + A sequence that allows "walking" (iterating) over items, typically + using a loop such as:: + + >>> x = [1, 2, 3] + >>> [item**2 for item in x] + [1, 4, 9] + + It is often used in combintion with ``enumerate``:: + + >>> for n, k in enumerate(keys): + ... print "Key %d: %s" % (n, k) + ... + Key 0: a + Key 1: b + Key 2: c + +list + A Python container that can hold any number of objects or items. + The items do not have to be of the same type, and can even be + lists themselves:: + + >>> x = [2, 2.0, "two", [2, 2.0]] + + The list `x` contains 4 items, each which can be accessed individually:: + + >>> x[2] # the string 'two' + 'two' + + >>> x[3] # a list, containing an integer 2 and a float 2.0 + [2, 2.0] + + It is also possible to select more than one item at a time, + using *slicing*:: + + >>> x[0:2] # or, equivalently, x[:2] + [2, 2.0] + + In code, arrays are often conveniently expressed as nested lists:: + + + >>> np.array([[1, 2], [3, 4]]) + array([[1, 2], + [3, 4]]) + + For more information, read the section on lists in the `Python + tutorial `_. For a mapping + type (key-value), see *dictionary*. + +mask + A boolean array, used to select only certain elements for an operation:: + + >>> x = np.arange(5) + >>> x + array([0, 1, 2, 3, 4]) + + >>> mask = (x > 2) + >>> mask + array([False, False, False, True, True], dtype=bool) + + >>> x[mask] = -1 + >>> x + array([ 0, 1, 2, -1, -1]) + +masked array + Array that suppressed values indicated by a mask:: + + >>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True]) + >>> x + masked_array(data = [-- 2.0 --], + mask = [ True False True], + fill_value=1e+20) + + >>> x + [1, 2, 3] + masked_array(data = [-- 4.0 --], + mask = [ True False True], + fill_value=1e+20) + + Masked arrays are often used when operating on arrays containing + missing or invalid entries. + +matrix + A 2-dimensional ndarray that preserves its two-dimensional nature + throughout operations. It has certain special operations, such as ``*`` + (matrix multiplication) and ``**`` (matrix power), defined:: + + >>> x = np.mat([[1, 2], [3, 4]]) + + >>> x + matrix([[1, 2], + [3, 4]]) + + >>> x**2 + matrix([[ 7, 10], + [15, 22]]) + +method + A function associated with an object. For example, each ndarray has a + method called ``repeat``:: + + >>> x = np.array([1, 2, 3]) + + >>> x.repeat(2) + array([1, 1, 2, 2, 3, 3]) + +reference + If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore, + ``a`` and ``b`` are different names for the same Python object. + +self + Often seen in method signatures, ``self`` refers to the instance + of the associated class. For example: + + >>> class Paintbrush(object): + ... color = 'blue' + ... + ... def paint(self): + ... print "Painting the city %s!" % self.color + ... + >>> p = Paintbrush() + >>> p.color = 'red' + >>> p.paint() # self refers to 'p' + Painting the city red! + +slice + Used to select only certain elements from a sequence:: + + >>> x = range(5) + >>> x + [0, 1, 2, 3, 4] + + >>> x[1:3] # slice from 1 to 3 (excluding 3 itself) + [1, 2] + + >>> x[1:5:2] # slice from 1 to 5, but skipping every second element + [1, 3] + + >>> x[::-1] # slice a sequence in reverse + [4, 3, 2, 1, 0] + + Arrays may have more than one dimension, each which can be sliced + individually:: + + >>> x = np.array([[1, 2], [3, 4]]) + >>> x + array([[1, 2], + [3, 4]]) + + >>> x[:, 1] + array([2, 4]) + +tuple + A sequence that may contain a variable number of types of any + kind. A tuple is immutable, i.e., once constructed it cannot be + changed. Similar to a list, it can be indexed and sliced:: + + >>> x = (1, 'one', [1, 2]) + + >>> x + (1, 'one', [1, 2]) + + >>> x[0] + 1 + + >>> x[:2] + (1, 'one') + + A useful concept is "tuple unpacking", which allows variables to + be assigned to the contents of a tuple:: + + >>> x, y = (1, 2) + >>> x, y = 1, 2 + + This is often used when a function returns multiple values: + + >>> def return_many(): + ... return 1, 'alpha' + + >>> a, b, c = return_many() + >>> a, b, c + (1, 'alpha', None) + + >>> a + 1 + >>> b + 'alpha' + +ufunc + Universal function. A fast element-wise array operation. Examples include + ``add``, ``sin`` and ``logical_or``. + +view + An array that does not own its data, but refers to another array's + data instead. For example, we may create a view that only shows + every second element of another array:: + + >>> x = np.arange(5) + >>> x + array([0, 1, 2, 3, 4]) + + >>> y = x[::2] + >>> y + array([0, 2, 4]) + + >>> x[0] = 3 # changing x changes y as well, since y is a view on x + >>> y + array([3, 2, 4]) + +wrapper + Python is a high-level (highly abstracted, or English-like) language. + This abstraction comes at a price in execution speed, and sometimes + it becomes necessary to use lower level languages to do fast + computations. A wrapper is code that provides a bridge between + high and the low level languages, allowing, e.g., Python to execute + code written in C or Fortran. + + Examples include ctypes, SWIG and Cython (which wraps C and C++) + and f2py (which wraps Fortran). + +""" diff --git a/numpy/doc/howtofind.py b/numpy/doc/howtofind.py new file mode 100644 index 000000000..29ad05318 --- /dev/null +++ b/numpy/doc/howtofind.py @@ -0,0 +1,9 @@ +""" + +================= +How to Find Stuff +================= + +How to find things in NumPy. + +""" diff --git a/numpy/doc/html/api-objects.txt b/numpy/doc/html/api-objects.txt deleted file mode 100644 index 81953990e..000000000 --- a/numpy/doc/html/api-objects.txt +++ /dev/null @@ -1,4 +0,0 @@ -example example-module.html -example.otherfunc example-module.html#otherfunc -example.foo example-module.html#foo -example.newfunc example-module.html#newfunc diff --git a/numpy/doc/html/crarr.png b/numpy/doc/html/crarr.png deleted file mode 100644 index 26b43c524..000000000 Binary files a/numpy/doc/html/crarr.png and /dev/null differ diff --git a/numpy/doc/html/epydoc.css b/numpy/doc/html/epydoc.css deleted file mode 100644 index 86d417068..000000000 --- a/numpy/doc/html/epydoc.css +++ /dev/null @@ -1,322 +0,0 @@ - - -/* Epydoc CSS Stylesheet - * - * This stylesheet can be used to customize the appearance of epydoc's - * HTML output. - * - */ - -/* Default Colors & Styles - * - Set the default foreground & background color with 'body'; and - * link colors with 'a:link' and 'a:visited'. - * - Use bold for decision list terms. - * - The heading styles defined here are used for headings *within* - * docstring descriptions. All headings used by epydoc itself use - * either class='epydoc' or class='toc' (CSS styles for both - * defined below). - */ -body { background: #ffffff; color: #000000; } -p { margin-top: 0.5em; margin-bottom: 0.5em; } -a:link { color: #0000ff; } -a:visited { color: #204080; } -dt { font-weight: bold; } -h1 { font-size: +140%; font-style: italic; - font-weight: bold; } -h2 { font-size: +125%; font-style: italic; - font-weight: bold; } -h3 { font-size: +110%; font-style: italic; - font-weight: normal; } -code { font-size: 100%; } -/* N.B.: class, not pseudoclass */ -a.link { font-family: monospace; } - -/* Page Header & Footer - * - The standard page header consists of a navigation bar (with - * pointers to standard pages such as 'home' and 'trees'); a - * breadcrumbs list, which can be used to navigate to containing - * classes or modules; options links, to show/hide private - * variables and to show/hide frames; and a page title (using - *

). The page title may be followed by a link to the - * corresponding source code (using 'span.codelink'). - * - The footer consists of a navigation bar, a timestamp, and a - * pointer to epydoc's homepage. - */ -h1.epydoc { margin: 0; font-size: +140%; font-weight: bold; } -h2.epydoc { font-size: +130%; font-weight: bold; } -h3.epydoc { font-size: +115%; font-weight: bold; - margin-top: 0.2em; } -td h3.epydoc { font-size: +115%; font-weight: bold; - margin-bottom: 0; } -table.navbar { background: #a0c0ff; color: #000000; - border: 2px groove #c0d0d0; } -table.navbar table { color: #000000; } -th.navbar-select { background: #70b0ff; - color: #000000; } -table.navbar a { text-decoration: none; } -table.navbar a:link { color: #0000ff; } -table.navbar a:visited { color: #204080; } -span.breadcrumbs { font-size: 85%; font-weight: bold; } -span.options { font-size: 70%; } -span.codelink { font-size: 85%; } -td.footer { font-size: 85%; } - -/* Table Headers - * - Each summary table and details section begins with a 'header' - * row. This row contains a section title (marked by - * 'span.table-header') as well as a show/hide private link - * (marked by 'span.options', defined above). - * - Summary tables that contain user-defined groups mark those - * groups using 'group header' rows. - */ -td.table-header { background: #70b0ff; color: #000000; - border: 1px solid #608090; } -td.table-header table { color: #000000; } -td.table-header table a:link { color: #0000ff; } -td.table-header table a:visited { color: #204080; } -span.table-header { font-size: 120%; font-weight: bold; } -th.group-header { background: #c0e0f8; color: #000000; - text-align: left; font-style: italic; - font-size: 115%; - border: 1px solid #608090; } - -/* Summary Tables (functions, variables, etc) - * - Each object is described by a single row of the table with - * two cells. The left cell gives the object's type, and is - * marked with 'code.summary-type'. The right cell gives the - * object's name and a summary description. - * - CSS styles for the table's header and group headers are - * defined above, under 'Table Headers' - */ -table.summary { border-collapse: collapse; - background: #e8f0f8; color: #000000; - border: 1px solid #608090; - margin-bottom: 0.5em; } -td.summary { border: 1px solid #608090; } -code.summary-type { font-size: 85%; } -table.summary a:link { color: #0000ff; } -table.summary a:visited { color: #204080; } - - -/* Details Tables (functions, variables, etc) - * - Each object is described in its own div. - * - A single-row summary table w/ table-header is used as - * a header for each details section (CSS style for table-header - * is defined above, under 'Table Headers'). - */ -table.details { border-collapse: collapse; - background: #e8f0f8; color: #000000; - border: 1px solid #608090; - margin: .2em 0 0 0; } -table.details table { color: #000000; } -table.details a:link { color: #0000ff; } -table.details a:visited { color: #204080; } - -/* Fields */ -dl.fields { margin-left: 2em; margin-top: 1em; - margin-bottom: 1em; } -dl.fields dd ul { margin-left: 0em; padding-left: 0em; } -dl.fields dd ul li ul { margin-left: 2em; padding-left: 0em; } -div.fields { margin-left: 2em; } -div.fields p { margin-bottom: 0.5em; } - -/* Index tables (identifier index, term index, etc) - * - link-index is used for indices containing lists of links - * (namely, the identifier index & term index). - * - index-where is used in link indices for the text indicating - * the container/source for each link. - * - metadata-index is used for indices containing metadata - * extracted from fields (namely, the bug index & todo index). - */ -table.link-index { border-collapse: collapse; - background: #e8f0f8; color: #000000; - border: 1px solid #608090; } -td.link-index { border-width: 0px; } -table.link-index a:link { color: #0000ff; } -table.link-index a:visited { color: #204080; } -span.index-where { font-size: 70%; } -table.metadata-index { border-collapse: collapse; - background: #e8f0f8; color: #000000; - border: 1px solid #608090; - margin: .2em 0 0 0; } -td.metadata-index { border-width: 1px; border-style: solid; } -table.metadata-index a:link { color: #0000ff; } -table.metadata-index a:visited { color: #204080; } - -/* Function signatures - * - sig* is used for the signature in the details section. - * - .summary-sig* is used for the signature in the summary - * table, and when listing property accessor functions. - * */ -.sig-name { color: #006080; } -.sig-arg { color: #008060; } -.sig-default { color: #602000; } -.summary-sig { font-family: monospace; } -.summary-sig-name { color: #006080; font-weight: bold; } -table.summary a.summary-sig-name:link - { color: #006080; font-weight: bold; } -table.summary a.summary-sig-name:visited - { color: #006080; font-weight: bold; } -.summary-sig-arg { color: #006040; } -.summary-sig-default { color: #501800; } - -/* Subclass list - */ -ul.subclass-list { display: inline; } -ul.subclass-list li { display: inline; } - -/* To render variables, classes etc. like functions */ -table.summary .summary-name { color: #006080; font-weight: bold; - font-family: monospace; } -table.summary - a.summary-name:link { color: #006080; font-weight: bold; - font-family: monospace; } -table.summary - a.summary-name:visited { color: #006080; font-weight: bold; - font-family: monospace; } - -/* Variable values - * - In the 'variable details' sections, each varaible's value is - * listed in a 'pre.variable' box. The width of this box is - * restricted to 80 chars; if the value's repr is longer than - * this it will be wrapped, using a backslash marked with - * class 'variable-linewrap'. If the value's repr is longer - * than 3 lines, the rest will be ellided; and an ellipsis - * marker ('...' marked with 'variable-ellipsis') will be used. - * - If the value is a string, its quote marks will be marked - * with 'variable-quote'. - * - If the variable is a regexp, it is syntax-highlighted using - * the re* CSS classes. - */ -pre.variable { padding: .5em; margin: 0; - background: #dce4ec; color: #000000; - border: 1px solid #708890; } -.variable-linewrap { color: #604000; font-weight: bold; } -.variable-ellipsis { color: #604000; font-weight: bold; } -.variable-quote { color: #604000; font-weight: bold; } -.variable-group { color: #008000; font-weight: bold; } -.variable-op { color: #604000; font-weight: bold; } -.variable-string { color: #006030; } -.variable-unknown { color: #a00000; font-weight: bold; } -.re { color: #000000; } -.re-char { color: #006030; } -.re-op { color: #600000; } -.re-group { color: #003060; } -.re-ref { color: #404040; } - -/* Base tree - * - Used by class pages to display the base class hierarchy. - */ -pre.base-tree { font-size: 80%; margin: 0; } - -/* Frames-based table of contents headers - * - Consists of two frames: one for selecting modules; and - * the other listing the contents of the selected module. - * - h1.toc is used for each frame's heading - * - h2.toc is used for subheadings within each frame. - */ -h1.toc { text-align: center; font-size: 105%; - margin: 0; font-weight: bold; - padding: 0; } -h2.toc { font-size: 100%; font-weight: bold; - margin: 0.5em 0 0 -0.3em; } - -/* Syntax Highlighting for Source Code - * - doctest examples are displayed in a 'pre.py-doctest' block. - * If the example is in a details table entry, then it will use - * the colors specified by the 'table pre.py-doctest' line. - * - Source code listings are displayed in a 'pre.py-src' block. - * Each line is marked with 'span.py-line' (used to draw a line - * down the left margin, separating the code from the line - * numbers). Line numbers are displayed with 'span.py-lineno'. - * The expand/collapse block toggle button is displayed with - * 'a.py-toggle' (Note: the CSS style for 'a.py-toggle' should not - * modify the font size of the text.) - * - If a source code page is opened with an anchor, then the - * corresponding code block will be highlighted. The code - * block's header is highlighted with 'py-highlight-hdr'; and - * the code block's body is highlighted with 'py-highlight'. - * - The remaining py-* classes are used to perform syntax - * highlighting (py-string for string literals, py-name for names, - * etc.) - */ -pre.py-doctest { padding: .5em; margin: 1em; - background: #e8f0f8; color: #000000; - border: 1px solid #708890; } -table pre.py-doctest { background: #dce4ec; - color: #000000; } -pre.py-src { border: 2px solid #000000; - background: #f0f0f0; color: #000000; } -.py-line { border-left: 2px solid #000000; - margin-left: .2em; padding-left: .4em; } -.py-lineno { font-style: italic; font-size: 90%; - padding-left: .5em; } -a.py-toggle { text-decoration: none; } -div.py-highlight-hdr { border-top: 2px solid #000000; - border-bottom: 2px solid #000000; - background: #d8e8e8; } -div.py-highlight { border-bottom: 2px solid #000000; - background: #d0e0e0; } -.py-prompt { color: #005050; font-weight: bold;} -.py-more { color: #005050; font-weight: bold;} -.py-string { color: #006030; } -.py-comment { color: #003060; } -.py-keyword { color: #600000; } -.py-output { color: #404040; } -.py-name { color: #000050; } -.py-name:link { color: #000050 !important; } -.py-name:visited { color: #000050 !important; } -.py-number { color: #005000; } -.py-defname { color: #000060; font-weight: bold; } -.py-def-name { color: #000060; font-weight: bold; } -.py-base-class { color: #000060; } -.py-param { color: #000060; } -.py-docstring { color: #006030; } -.py-decorator { color: #804020; } -/* Use this if you don't want links to names underlined: */ -/*a.py-name { text-decoration: none; }*/ - -/* Graphs & Diagrams - * - These CSS styles are used for graphs & diagrams generated using - * Graphviz dot. 'img.graph-without-title' is used for bare - * diagrams (to remove the border created by making the image - * clickable). - */ -img.graph-without-title { border: none; } -img.graph-with-title { border: 1px solid #000000; } -span.graph-title { font-weight: bold; } -span.graph-caption { } - -/* General-purpose classes - * - 'p.indent-wrapped-lines' defines a paragraph whose first line - * is not indented, but whose subsequent lines are. - * - The 'nomargin-top' class is used to remove the top margin (e.g. - * from lists). The 'nomargin' class is used to remove both the - * top and bottom margin (but not the left or right margin -- - * for lists, that would cause the bullets to disappear.) - */ -p.indent-wrapped-lines { padding: 0 0 0 7em; text-indent: -7em; - margin: 0; } -.nomargin-top { margin-top: 0; } -.nomargin { margin-top: 0; margin-bottom: 0; } - -/* HTML Log */ -div.log-block { padding: 0; margin: .5em 0 .5em 0; - background: #e8f0f8; color: #000000; - border: 1px solid #000000; } -div.log-error { padding: .1em .3em .1em .3em; margin: 4px; - background: #ffb0b0; color: #000000; - border: 1px solid #000000; } -div.log-warning { padding: .1em .3em .1em .3em; margin: 4px; - background: #ffffb0; color: #000000; - border: 1px solid #000000; } -div.log-info { padding: .1em .3em .1em .3em; margin: 4px; - background: #b0ffb0; color: #000000; - border: 1px solid #000000; } -h2.log-hdr { background: #70b0ff; color: #000000; - margin: 0; padding: 0em 0.5em 0em 0.5em; - border-bottom: 1px solid #000000; font-size: 110%; } -p.log { font-weight: bold; margin: .5em 0 .5em 0; } -tr.opt-changed { color: #000000; font-weight: bold; } -tr.opt-default { color: #606060; } -pre.log { margin: 0; padding: 0; padding-left: 1em; } diff --git a/numpy/doc/html/epydoc.js b/numpy/doc/html/epydoc.js deleted file mode 100644 index e787dbcf4..000000000 --- a/numpy/doc/html/epydoc.js +++ /dev/null @@ -1,293 +0,0 @@ -function toggle_private() { - // Search for any private/public links on this page. Store - // their old text in "cmd," so we will know what action to - // take; and change their text to the opposite action. - var cmd = "?"; - var elts = document.getElementsByTagName("a"); - for(var i=0; i...
"; - elt.innerHTML = s; - } -} - -function toggle(id) { - elt = document.getElementById(id+"-toggle"); - if (elt.innerHTML == "-") - collapse(id); - else - expand(id); - return false; -} - -function highlight(id) { - var elt = document.getElementById(id+"-def"); - if (elt) elt.className = "py-highlight-hdr"; - var elt = document.getElementById(id+"-expanded"); - if (elt) elt.className = "py-highlight"; - var elt = document.getElementById(id+"-collapsed"); - if (elt) elt.className = "py-highlight"; -} - -function num_lines(s) { - var n = 1; - var pos = s.indexOf("\n"); - while ( pos > 0) { - n += 1; - pos = s.indexOf("\n", pos+1); - } - return n; -} - -// Collapse all blocks that mave more than `min_lines` lines. -function collapse_all(min_lines) { - var elts = document.getElementsByTagName("div"); - for (var i=0; i 0) - if (elt.id.substring(split, elt.id.length) == "-expanded") - if (num_lines(elt.innerHTML) > min_lines) - collapse(elt.id.substring(0, split)); - } -} - -function expandto(href) { - var start = href.indexOf("#")+1; - if (start != 0 && start != href.length) { - if (href.substring(start, href.length) != "-") { - collapse_all(4); - pos = href.indexOf(".", start); - while (pos != -1) { - var id = href.substring(start, pos); - expand(id); - pos = href.indexOf(".", pos+1); - } - var id = href.substring(start, href.length); - expand(id); - highlight(id); - } - } -} - -function kill_doclink(id) { - var parent = document.getElementById(id); - parent.removeChild(parent.childNodes.item(0)); -} -function auto_kill_doclink(ev) { - if (!ev) var ev = window.event; - if (!this.contains(ev.toElement)) { - var parent = document.getElementById(this.parentID); - parent.removeChild(parent.childNodes.item(0)); - } -} - -function doclink(id, name, targets_id) { - var elt = document.getElementById(id); - - // If we already opened the box, then destroy it. - // (This case should never occur, but leave it in just in case.) - if (elt.childNodes.length > 1) { - elt.removeChild(elt.childNodes.item(0)); - } - else { - // The outer box: relative + inline positioning. - var box1 = document.createElement("div"); - box1.style.position = "relative"; - box1.style.display = "inline"; - box1.style.top = 0; - box1.style.left = 0; - - // A shadow for fun - var shadow = document.createElement("div"); - shadow.style.position = "absolute"; - shadow.style.left = "-1.3em"; - shadow.style.top = "-1.3em"; - shadow.style.background = "#404040"; - - // The inner box: absolute positioning. - var box2 = document.createElement("div"); - box2.style.position = "relative"; - box2.style.border = "1px solid #a0a0a0"; - box2.style.left = "-.2em"; - box2.style.top = "-.2em"; - box2.style.background = "white"; - box2.style.padding = ".3em .4em .3em .4em"; - box2.style.fontStyle = "normal"; - box2.onmouseout=auto_kill_doclink; - box2.parentID = id; - - // Get the targets - var targets_elt = document.getElementById(targets_id); - var targets = targets_elt.getAttribute("targets"); - var links = ""; - target_list = targets.split(","); - for (var i=0; i" + - target[0] + ""; - } - - // Put it all together. - elt.insertBefore(box1, elt.childNodes.item(0)); - //box1.appendChild(box2); - box1.appendChild(shadow); - shadow.appendChild(box2); - box2.innerHTML = - "Which "+name+" do you want to see documentation for?" + - ""; - } - return false; -} - -function get_anchor() { - var href = location.href; - var start = href.indexOf("#")+1; - if ((start != 0) && (start != href.length)) - return href.substring(start, href.length); - } -function redirect_url(dottedName) { - // Scan through each element of the "pages" list, and check - // if "name" matches with any of them. - for (var i=0; i-m" or "-c"; - // extract the portion & compare it to dottedName. - var pagename = pages[i].substring(0, pages[i].length-2); - if (pagename == dottedName.substring(0,pagename.length)) { - - // We've found a page that matches `dottedName`; - // construct its URL, using leftover `dottedName` - // content to form an anchor. - var pagetype = pages[i].charAt(pages[i].length-1); - var url = pagename + ((pagetype=="m")?"-module.html": - "-class.html"); - if (dottedName.length > pagename.length) - url += "#" + dottedName.substring(pagename.length+1, - dottedName.length); - return url; - } - } - } diff --git a/numpy/doc/html/example-module.html b/numpy/doc/html/example-module.html deleted file mode 100644 index f08370632..000000000 --- a/numpy/doc/html/example-module.html +++ /dev/null @@ -1,316 +0,0 @@ - - - - - example - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - Module example - - - - - - -
[hide private]
[frames] | no frames]
-
- -

Module example

source code

-

This is the docstring for the example.py module. Modules names should -have short, all-lowercase names. The module name may have underscores if -this improves readability.

-

Every module should have a docstring at the very top of the file. The -module's docstring may extend over multiple lines. If your docstring does -extend over multiple lines, the closing three quotation marks must be on -a line by itself, preferably preceeded by a blank line.

- - - - - - - - - - - - - - - - -
- - - - - -
Functions[hide private]
-
-   - - - - - - -
foo(var1, - var2, - long_var_name='hi')
- One-line summary or signature.
- source code - -
- -
-   - - - - - - -
newfunc()
- Do nothing.
- source code - -
- -
-   - - - - - - -
otherfunc()
- Do nothing.
- source code - -
- -
- - - - - - -
- - - - - -
Function Details[hide private]
-
- -
- -
- - -
-

foo(var1, - var2, - long_var_name='hi') -

-
source code  -
- -

One-line summary or signature.

-

Several sentences providing an extended description. You can put -text in mono-spaced type like so: var.

-
-

Parameters

-
-
var1 : array_like
-
Array_like means all those objects -- lists, nested lists, etc. -- -that can be converted to an array.
-
var2 : integer
-
Write out the full type
-
long_variable_name : {'hi', 'ho'}, optional
-
Choices in brackets, default first when optional.
-
-
-
-

Returns

-
-
named : type
-
Explanation
-
list
-
Explanation
-
of
-
Explanation
-
outputs
-
even more explaining
-
-
-
-

Other Parameters

-
-
only_seldom_used_keywords : type
-
Explanation
-
common_parametrs_listed_above : type
-
Explanation
-
-
-
-

See Also

-

otherfunc : relationship (optional) -newfunc : relationship (optional)

-
-
-

Notes

-

Notes about the implementation algorithm (if needed).

-

This can have multiple paragraphs as can all sections.

-
-
-

Examples

-

examples in doctest format

-
->>> a=[1,2,3]
->>> [x + 3 for x in a]
-[4, 5, 6]
-
-
-
-
-
- -
- -
- - -
-

newfunc() -

-
source code  -
- -

Do nothing.

-

I never saw a purple cow.

-
-
-
-
- -
- -
- - -
-

otherfunc() -

-
source code  -
- -

Do nothing.

-

I never hope to see one.

-
-
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - -
- - - - diff --git a/numpy/doc/html/example-pysrc.html b/numpy/doc/html/example-pysrc.html deleted file mode 100644 index f771330e2..000000000 --- a/numpy/doc/html/example-pysrc.html +++ /dev/null @@ -1,204 +0,0 @@ - - - - - example - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - Module example - - - - - - -
[hide private]
[frames] | no frames]
-
-

Source Code for Module example

-
- 1  """This is the docstring for the example.py module.  Modules names should 
- 2  have short, all-lowercase names.  The module name may have underscores if 
- 3  this improves readability. 
- 4   
- 5  Every module should have a docstring at the very top of the file.  The 
- 6  module's docstring may extend over multiple lines.  If your docstring does 
- 7  extend over multiple lines, the closing three quotation marks must be on 
- 8  a line by itself, preferably preceeded by a blank line. 
- 9   
-10  """ 
-11  __docformat__ = "restructuredtext en" 
-12   
-13  import os                      # standard library imports first 
-14   
-15  import numpy as np             # related third party imports next 
-16  import scipy as sp             # imports should be at the top of the module 
-17  import matplotlib as mpl       # imports should usually be on separate lines 
-18   
-
19 -def foo(var1, var2, long_var_name='hi') : -
20 """One-line summary or signature. -21 -22 Several sentences providing an extended description. You can put -23 text in mono-spaced type like so: ``var``. -24 -25 Parameters -26 ---------- -27 var1 : array_like -28 Array_like means all those objects -- lists, nested lists, etc. -- -29 that can be converted to an array. -30 var2 : integer -31 Write out the full type -32 long_variable_name : {'hi', 'ho'}, optional -33 Choices in brackets, default first when optional. -34 -35 Returns -36 ------- -37 named : type -38 Explanation -39 list -40 Explanation -41 of -42 Explanation -43 outputs -44 even more explaining -45 -46 Other Parameters -47 ---------------- -48 only_seldom_used_keywords : type -49 Explanation -50 common_parametrs_listed_above : type -51 Explanation -52 -53 See Also -54 -------- -55 otherfunc : relationship (optional) -56 newfunc : relationship (optional) -57 -58 Notes -59 ----- -60 Notes about the implementation algorithm (if needed). -61 -62 This can have multiple paragraphs as can all sections. -63 -64 Examples -65 -------- -66 examples in doctest format -67 -68 >>> a=[1,2,3] -69 >>> [x + 3 for x in a] -70 [4, 5, 6] -71 -72 """ -73 -74 pass -
75 -76 -
77 -def newfunc() : -
78 """Do nothing. -79 -80 I never saw a purple cow. -81 -82 """ -83 -84 pass -
85 -86 -
87 -def otherfunc() : -
88 """Do nothing. -89 -90 I never hope to see one. -91 -92 """ -93 -94 pass -
95 -
-
- - - - - - - - - - - - - - - - - - - - - - - -
- - - - diff --git a/numpy/doc/html/frames.html b/numpy/doc/html/frames.html deleted file mode 100644 index 6ebc67e75..000000000 --- a/numpy/doc/html/frames.html +++ /dev/null @@ -1,17 +0,0 @@ - - - - - API Documentation - - - - - - - - - diff --git a/numpy/doc/html/help.html b/numpy/doc/html/help.html deleted file mode 100644 index b92302aeb..000000000 --- a/numpy/doc/html/help.html +++ /dev/null @@ -1,268 +0,0 @@ - - - - - Help - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  - - - - -
[hide private]
[frames] | no frames]
-
- -

API Documentation

- -

This document contains the API (Application Programming Interface) -documentation for this project. Documentation for the Python -objects defined by the project is divided into separate pages for each -package, module, and class. The API documentation also includes two -pages containing information about the project as a whole: a trees -page, and an index page.

- -

Object Documentation

- -

Each Package Documentation page contains:

-
    -
  • A description of the package.
  • -
  • A list of the modules and sub-packages contained by the - package.
  • -
  • A summary of the classes defined by the package.
  • -
  • A summary of the functions defined by the package.
  • -
  • A summary of the variables defined by the package.
  • -
  • A detailed description of each function defined by the - package.
  • -
  • A detailed description of each variable defined by the - package.
  • -
- -

Each Module Documentation page contains:

-
    -
  • A description of the module.
  • -
  • A summary of the classes defined by the module.
  • -
  • A summary of the functions defined by the module.
  • -
  • A summary of the variables defined by the module.
  • -
  • A detailed description of each function defined by the - module.
  • -
  • A detailed description of each variable defined by the - module.
  • -
- -

Each Class Documentation page contains:

-
    -
  • A class inheritance diagram.
  • -
  • A list of known subclasses.
  • -
  • A description of the class.
  • -
  • A summary of the methods defined by the class.
  • -
  • A summary of the instance variables defined by the class.
  • -
  • A summary of the class (static) variables defined by the - class.
  • -
  • A detailed description of each method defined by the - class.
  • -
  • A detailed description of each instance variable defined by the - class.
  • -
  • A detailed description of each class (static) variable defined - by the class.
  • -
- -

Project Documentation

- -

The Trees page contains the module and class hierarchies:

-
    -
  • The module hierarchy lists every package and module, with - modules grouped into packages. At the top level, and within each - package, modules and sub-packages are listed alphabetically.
  • -
  • The class hierarchy lists every class, grouped by base - class. If a class has more than one base class, then it will be - listed under each base class. At the top level, and under each base - class, classes are listed alphabetically.
  • -
- -

The Index page contains indices of terms and - identifiers:

-
    -
  • The term index lists every term indexed by any object's - documentation. For each term, the index provides links to each - place where the term is indexed.
  • -
  • The identifier index lists the (short) name of every package, - module, class, method, function, variable, and parameter. For each - identifier, the index provides a short description, and a link to - its documentation.
  • -
- -

The Table of Contents

- -

The table of contents occupies the two frames on the left side of -the window. The upper-left frame displays the project -contents, and the lower-left frame displays the module -contents:

- - - - - - - - - -
- Project
Contents
...
- API
Documentation
Frame


-
- Module
Contents
 
...
  -

- -

The project contents frame contains a list of all packages -and modules that are defined by the project. Clicking on an entry -will display its contents in the module contents frame. Clicking on a -special entry, labeled "Everything," will display the contents of -the entire project.

- -

The module contents frame contains a list of every -submodule, class, type, exception, function, and variable defined by a -module or package. Clicking on an entry will display its -documentation in the API documentation frame. Clicking on the name of -the module, at the top of the frame, will display the documentation -for the module itself.

- -

The "frames" and "no frames" buttons below the top -navigation bar can be used to control whether the table of contents is -displayed or not.

- -

The Navigation Bar

- -

A navigation bar is located at the top and bottom of every page. -It indicates what type of page you are currently viewing, and allows -you to go to related pages. The following table describes the labels -on the navigation bar. Note that not some labels (such as -[Parent]) are not displayed on all pages.

- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
LabelHighlighted when...Links to...
[Parent](never highlighted) the parent of the current package
[Package]viewing a packagethe package containing the current object -
[Module]viewing a modulethe module containing the current object -
[Class]viewing a class the class containing the current object
[Trees]viewing the trees page the trees page
[Index]viewing the index page the index page
[Help]viewing the help page the help page
- -

The "show private" and "hide private" buttons below -the top navigation bar can be used to control whether documentation -for private objects is displayed. Private objects are usually defined -as objects whose (short) names begin with a single underscore, but do -not end with an underscore. For example, "_x", -"__pprint", and "epydoc.epytext._tokenize" -are private objects; but "re.sub", -"__init__", and "type_" are not. However, -if a module defines the "__all__" variable, then its -contents are used to decide which objects are private.

- -

A timestamp below the bottom navigation bar indicates when each -page was last updated.

- - - - - - - - - - - - - - - - - - - - - - - -
- - - - diff --git a/numpy/doc/html/identifier-index.html b/numpy/doc/html/identifier-index.html deleted file mode 100644 index c29b8c5c9..000000000 --- a/numpy/doc/html/identifier-index.html +++ /dev/null @@ -1,180 +0,0 @@ - - - - - Identifier Index - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  - - - - -
[hide private]
[frames] | no frames]
-
- -
-

Identifier Index

-
-[ - A - B - C - D - E - F - G - H - I - J - K - L - M - N - O - P - Q - R - S - T - U - V - W - X - Y - Z - _ -] -
- - - - - - - - - -

E

- - - - - - - - -

F

- - - - - - - - -

N

- - - - - - - - -

O

- - - - - - - - -
-

- - - - - - - - - - - - - - - - - - - - - - -
- - - - diff --git a/numpy/doc/html/index.html b/numpy/doc/html/index.html deleted file mode 100644 index 6ebc67e75..000000000 --- a/numpy/doc/html/index.html +++ /dev/null @@ -1,17 +0,0 @@ - - - - - API Documentation - - - - - - - - - diff --git a/numpy/doc/html/module-tree.html b/numpy/doc/html/module-tree.html deleted file mode 100644 index ad64d11b2..000000000 --- a/numpy/doc/html/module-tree.html +++ /dev/null @@ -1,101 +0,0 @@ - - - - - Module Hierarchy - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  - - - - -
[hide private]
[frames] | no frames]
-
-

Module Hierarchy

-
    -
  • example: This is the docstring for the example.py module.
  • -
- - - - - - - - - - - - - - - - - - - - - - - -
- - - - diff --git a/numpy/doc/html/redirect.html b/numpy/doc/html/redirect.html deleted file mode 100644 index dbd50828c..000000000 --- a/numpy/doc/html/redirect.html +++ /dev/null @@ -1,38 +0,0 @@ -Epydoc Redirect Page - - - - - - - - -

Epydoc Auto-redirect page

- -

When javascript is enabled, this page will redirect URLs of -the form redirect.html#dotted.name to the -documentation for the object with the given fully-qualified -dotted name.

-

 

- - - - - diff --git a/numpy/doc/html/toc-everything.html b/numpy/doc/html/toc-everything.html deleted file mode 100644 index ce94bd1f0..000000000 --- a/numpy/doc/html/toc-everything.html +++ /dev/null @@ -1,33 +0,0 @@ - - - - - Everything - - - - - -

Everything

-
-

All Functions

- example.foo
example.newfunc
example.otherfunc

-[hide private] - - - - diff --git a/numpy/doc/html/toc-example-module.html b/numpy/doc/html/toc-example-module.html deleted file mode 100644 index c691dccae..000000000 --- a/numpy/doc/html/toc-example-module.html +++ /dev/null @@ -1,33 +0,0 @@ - - - - - example - - - - - -

Module example

-
-

Functions

- foo
newfunc
otherfunc

-[hide private] - - - - diff --git a/numpy/doc/html/toc.html b/numpy/doc/html/toc.html deleted file mode 100644 index 7008e1695..000000000 --- a/numpy/doc/html/toc.html +++ /dev/null @@ -1,33 +0,0 @@ - - - - - Table of Contents - - - - - -

Table of Contents

-
- Everything -
-

Modules

- example

- [hide private] - - - - diff --git a/numpy/doc/indexing.py b/numpy/doc/indexing.py new file mode 100644 index 000000000..365edd67a --- /dev/null +++ b/numpy/doc/indexing.py @@ -0,0 +1,384 @@ +""" +============== +Array indexing +============== + +Array indexing refers to any use of the square brackets ([]) to index +array values. There are many options to indexing, which give numpy +indexing great power, but with power comes some complexity and the +potential for confusion. This section is just an overview of the +various options and issues related to indexing. Aside from single +element indexing, the details on most of these options are to be +found in related sections. + +Assignment vs referencing +========================= + +Most of the following examples show the use of indexing when referencing +data in an array. The examples work just as well when assigning to an +array. See the section at the end for specific examples and explanations +on how assignments work. + +Single element indexing +======================= + +Single element indexing for a 1-D array is what one expects. It work +exactly like that for other standard Python sequences. It is 0-based, +and accepts negative indices for indexing from the end of the array. :: + + >>> x = np.arange(10) + >>> x[2] + 2 + >>> x[-2] + 8 + +Unlike lists and tuples, numpy arrays support multidimensional indexing +for multidimensional arrays. That means that it is not necessary to +separate each dimension's index into its own set of square brackets. :: + + >>> x.shape = (2,5) # now x is 2-dimensional + >>> x[1,3] + 8 + >>> x[1,-1] + 9 + +Note that if one indexes a multidimensional array with fewer indices +than dimensions, one gets a subdimensional array. For example: :: + + >>> x[0] + array([0, 1, 2, 3, 4]) + +That is, each index specified selects the array corresponding to the rest +of the dimensions selected. In the above example, choosing 0 means that +remaining dimension of lenth 5 is being left unspecified, and that what +is returned is an array of that dimensionality and size. It must be noted +that the returned array is not a copy of the original, but points to the +same values in memory as does the original array (a new view of the same +data in other words, see xxx for details). In this case, +the 1-D array at the first position (0) is returned. So using a single +index on the returned array, results in a single element being returned. +That is: :: + + >>> x[0][2] + 2 + +So note that ``x[0,2] = x[0][2]`` though the second case is more inefficient +a new temporary array is created after the first index that is subsequently +indexed by 2. + +Note to those used to IDL or Fortran memory order as it relates to indexing. +Numpy uses C-order indexing. That means that the last index usually (see +xxx for exceptions) represents the most rapidly changing memory location, +unlike Fortran or IDL, where the first index represents the most rapidly +changing location in memory. This difference represents a great potential +for confusion. + +Other indexing options +====================== + +It is possible to slice and stride arrays to extract arrays of the same +number of dimensions, but of different sizes than the original. The slicing +and striding works exactly the same way it does for lists and tuples except +that they can be applied to multiple dimensions as well. A few +examples illustrates best: :: + + >>> x = np.arange(10) + >>> x[2:5] + array([2, 3, 4]) + >>> x[:-7] + array([0, 1, 2]) + >>> x[1:7:2] + array([1,3,5]) + >>> y = np.arange(35).reshape(5,7) + >>> y[1:5:2,::3] + array([[ 7, 10, 13], + [21, 24, 27]]) + +Note that slices of arrays do not copy the internal array data but +also produce new views of the original data (see xxx for more +explanation of this issue). + +It is possible to index arrays with other arrays for the purposes of +selecting lists of values out of arrays into new arrays. There are two +different ways of accomplishing this. One uses one or more arrays of +index values (see xxx for details). The other involves giving a boolean +array of the proper shape to indicate the values to be selected. +Index arrays are a very powerful tool that allow one to avoid looping +over individual elements in arrays and thus greatly improve performance +(see xxx for examples) + +It is possible to use special features to effectively increase the +number of dimensions in an array through indexing so the resulting +array aquires the shape needed for use in an expression or with a +specific function. See xxx. + +Index arrays +============ + +Numpy arrays may be indexed with other arrays (or any other sequence-like +object that can be converted to an array, such as lists, with the exception +of tuples; see the end of this document for why this is). The use of index +arrays ranges from simple, straightforward cases to complex, hard-to-understand +cases. For all cases of index arrays, what is returned is a copy of the +original data, not a view as one gets for slices. + +Index arrays must be of integer type. Each value in the array indicates which +value in the array to use in place of the index. To illustrate: :: + + >>> x = np.arange(10,1,-1) + >>> x + array([10, 9, 8, 7, 6, 5, 4, 3, 2]) + >>> x[np.array([3, 3, 1, 8])] + array([7, 7, 9, 2]) + + +The index array consisting of the values 3, 3, 1 and 8 correspondingly create +an array of length 4 (same as the index array) where each index is replaced by +the value the index array has in the array being indexed. + +Negative values are permitted and work as they do with single indices or slices: :: + + >>> x[np.array([3,3,-3,8])] + array([7, 7, 4, 2]) + +It is an error to have index values out of bounds: :: + + >>> x[np.array([3, 3, 20, 8])] + : index 20 out of bounds 0<=index<9 + +Generally speaking, what is returned when index arrays are used is an array with +the same shape as the index array, but with the type and values of the array being +indexed. As an example, we can use a multidimensional index array instead: :: + + >>> x[np.array([[1,1],[2,3]])] + array([[9, 9], + [8, 7]]) + +Indexing Multi-dimensional arrays +================================= + +Things become more complex when multidimensional arrays are indexed, particularly +with multidimensional index arrays. These tend to be more unusal uses, but they +are permitted, and they are useful for some problems. We'll start with the +simplest multidimensional case (using the array y from the previous examples): :: + + >>> y[np.array([0,2,4]), np.array([0,1,2])] + array([ 0, 15, 30]) + +In this case, if the index arrays have a matching shape, and there is an index +array for each dimension of the array being indexed, the resultant array has the +same shape as the index arrays, and the values correspond to the index set for each +position in the index arrays. In this example, the first index value is 0 for both +index arrays, and thus the first value of the resultant array is y[0,0]. The next +value is y[2,1], and the last is y[4,2]. + +If the index arrays do not have the same shape, there is an attempt to broadcast +them to the same shape. Broadcasting won't be discussed here but is discussed in +detail in xxx. If they cannot be broadcast to the same shape, an exception is +raised: :: + + >>> y[np.array([0,2,4]), np.array([0,1])] + : shape mismatch: objects cannot be broadcast to a single shape + +The broadcasting mechanism permits index arrays to be combined with scalars for +other indices. The effect is that the scalar value is used for all the corresponding +values of the index arrays: :: + + >>> y[np.array([0,2,4]), 1] + array([ 1, 15, 29]) + +Jumping to the next level of complexity, it is possible to only partially index an array +with index arrays. It takes a bit of thought to understand what happens in such cases. +For example if we just use one index array with y: :: + + >>> y[np.array([0,2,4])] + array([[ 0, 1, 2, 3, 4, 5, 6], + [14, 15, 16, 17, 18, 19, 20], + [28, 29, 30, 31, 32, 33, 34]]) + +What results is the construction of a new array where each value of the index array +selects one row from the array being indexed and the resultant array has the resulting +shape (size of row, number index elements). + +An example of where this may be useful is for a color lookup table where we want to map +the values of an image into RGB triples for display. The lookup table could have a shape +(nlookup, 3). Indexing such an array with an image with shape (ny, nx) with dtype=np.uint8 +(or any integer type so long as values are with the bounds of the lookup table) will +result in an array of shape (ny, nx, 3) where a triple of RGB values is associated with +each pixel location. + +In general, the shape of the resulant array will be the concatenation of the shape of +the index array (or the shape that all the index arrays were broadcast to) with the +shape of any unused dimensions (those not indexed) in the array being indexed. + +Boolean or "mask" index arrays +============================== + +Boolean arrays used as indices are treated in a different manner entirely than index +arrays. Boolean arrays must be of the same shape as the array being indexed, or +broadcastable to the same shape. In the most straightforward case, the boolean array +has the same shape: :: + + >>> b = y>20 + >>> y[b] + array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]) + +The result is a 1-D array containing all the elements in the indexed array corresponding +to all the true elements in the boolean array. As with index arrays, what is returned +is a copy of the data, not a view as one gets with slices. + +With broadcasting, multidimesional arrays may be the result. For example: :: + + >>> b[:,5] # use a 1-D boolean that broadcasts with y + array([False, False, False, True, True], dtype=bool) + >>> y[b[:,5]] + array([[21, 22, 23, 24, 25, 26, 27], + [28, 29, 30, 31, 32, 33, 34]]) + +Here the 4th and 5th rows are selected from the indexed array and combined to make a +2-D array. + +Combining index arrays with slices +================================== + +Index arrays may be combined with slices. For example: :: + + >>> y[np.array([0,2,4]),1:3] + array([[ 1, 2], + [15, 16], + [29, 30]]) + +In effect, the slice is converted to an index array np.array([[1,2]]) (shape (1,2)) that is +broadcast with the index array to produce a resultant array of shape (3,2). + +Likewise, slicing can be combined with broadcasted boolean indices: :: + + >>> y[b[:,5],1:3] + array([[22, 23], + [29, 30]]) + +Structural indexing tools +========================= + +To facilitate easy matching of array shapes with expressions and in +assignments, the np.newaxis object can be used within array indices +to add new dimensions with a size of 1. For example: :: + + >>> y.shape + (5, 7) + >>> y[:,np.newaxis,:].shape + (5, 1, 7) + +Note that there are no new elements in the array, just that the +dimensionality is increased. This can be handy to combine two +arrays in a way that otherwise would require explicitly reshaping +operations. For example: :: + + >>> x = np.arange(5) + >>> x[:,np.newaxis] + x[np.newaxis,:] + array([[0, 1, 2, 3, 4], + [1, 2, 3, 4, 5], + [2, 3, 4, 5, 6], + [3, 4, 5, 6, 7], + [4, 5, 6, 7, 8]]) + +The ellipsis syntax maybe used to indicate selecting in full any +remaining unspecified dimensions. For example: :: + + >>> z = np.arange(81).reshape(3,3,3,3) + >>> z[1,...,2] + array([[29, 32, 35], + [38, 41, 44], + [47, 50, 53]]) + +This is equivalent to: :: + + >>> z[1,:,:,2] + +Assigning values to indexed arrays +================================== + +As mentioned, one can select a subset of an array to assign to using +a single index, slices, and index and mask arrays. The value being +assigned to the indexed array must be shape consistent (the same shape +or broadcastable to the shape the index produces). For example, it is +permitted to assign a constant to a slice: :: + + >>> x[2:7] = 1 + +or an array of the right size: :: + + >>> x[2:7] = np.arange(5) + +Note that assignments may result in changes if assigning +higher types to lower types (like floats to ints) or even +exceptions (assigning complex to floats or ints): :: + + >>> x[1] = 1.2 + >>> x[1] + 1 + >>> x[1] = 1.2j + : can't convert complex to long; use long(abs(z)) + + +Unlike some of the references (such as array and mask indices) +assignments are always made to the original data in the array +(indeed, nothing else would make sense!). Note though, that some +actions may not work as one may naively expect. This particular +example is often surprising to people: :: + + >>> x[np.array([1, 1, 3, 1]) += 1 + +Where people expect that the 1st location will be incremented by 3. +In fact, it will only be incremented by 1. The reason is because +a new array is extracted from the original (as a temporary) containing +the values at 1, 1, 3, 1, then the value 1 is added to the temporary, +and then the temporary is assigned back to the original array. Thus +the value of the array at x[1]+1 is assigned to x[1] three times, +rather than being incremented 3 times. + +Dealing with variable numbers of indices within programs +======================================================== + +The index syntax is very powerful but limiting when dealing with +a variable number of indices. For example, if you want to write +a function that can handle arguments with various numbers of +dimensions without having to write special case code for each +number of possible dimensions, how can that be done? If one +supplies to the index a tuple, the tuple will be interpreted +as a list of indices. For example (using the previous definition +for the array z): :: + + >>> indices = (1,1,1,1) + >>> z[indices] + 40 + +So one can use code to construct tuples of any number of indices +and then use these within an index. + +Slices can be specified within programs by using the slice() function +in Python. For example: :: + + >>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2] + array([39, 40]) + +Likewise, ellipsis can be specified by code by using the Ellipsis object: :: + + >>> indices = (1, Ellipsis, 1) # same as [1,...,1] + >>> z[indices] + array([[28, 31, 34], + [37, 40, 43], + [46, 49, 52]]) + +For this reason it is possible to use the output from the np.where() +function directly as an index since it always returns a tuple of index arrays. + +Because the special treatment of tuples, they are not automatically converted +to an array as a list would be. As an example: :: + + >>> z[[1,1,1,1]] + ... # produces a large array + >>> z[(1,1,1,1)] + 40 # returns a single value + +""" diff --git a/numpy/doc/internals.py b/numpy/doc/internals.py new file mode 100644 index 000000000..a74429368 --- /dev/null +++ b/numpy/doc/internals.py @@ -0,0 +1,162 @@ +""" +=============== +Array Internals +=============== + +Internal organization of numpy arrays +===================================== + +It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to Numpy". + +Numpy arrays consist of two major components, the raw array data (from now on, +referred to as the data buffer), and the information about the raw array data. +The data buffer is typically what people think of as arrays in C or Fortran, +a contiguous (and fixed) block of memory containing fixed sized data items. +Numpy also contains a significant set of data that describes how to interpret +the data in the data buffer. This extra information contains (among other things): + + 1) The basic data element's size in bytes + 2) The start of the data within the data buffer (an offset relative to the + beginning of the data buffer). + 3) The number of dimensions and the size of each dimension + 4) The separation between elements for each dimension (the 'stride'). This + does not have to be a multiple of the element size + 5) The byte order of the data (which may not be the native byte order) + 6) Whether the buffer is read-only + 7) Information (via the dtype object) about the interpretation of the basic + data element. The basic data element may be as simple as a int or a float, + or it may be a compound object (e.g., struct-like), a fixed character field, + or Python object pointers. + 8) Whether the array is to interpreted as C-order or Fortran-order. + +This arrangement allow for very flexible use of arrays. One thing that it allows +is simple changes of the metadata to change the interpretation of the array buffer. +Changing the byteorder of the array is a simple change involving no rearrangement +of the data. The shape of the array can be changed very easily without changing +anything in the data buffer or any data copying at all + +Among other things that are made possible is one can create a new array metadata +object that uses the same data buffer +to create a new view of that data buffer that has a different interpretation +of the buffer (e.g., different shape, offset, byte order, strides, etc) but +shares the same data bytes. Many operations in numpy do just this such as +slices. Other operations, such as transpose, don't move data elements +around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move. + +Typically these new versions of the array metadata but the same data buffer are +new 'views' into the data buffer. There is a different ndarray object, but it +uses the same data buffer. This is why it is necessary to force copies through +use of the .copy() method if one really wants to make a new and independent +copy of the data buffer. + +New views into arrays mean the the object reference counts for the data buffer +increase. Simply doing away with the original array object will not remove the +data buffer if other views of it still exist. + +Multidimensional Array Indexing Order Issues +============================================ + +What is the right way to index +multi-dimensional arrays? Before you jump to conclusions about the one and +true way to index multi-dimensional arrays, it pays to understand why this is +a confusing issue. This section will try to explain in detail how numpy +indexing works and why we adopt the convention we do for images, and when it +may be appropriate to adopt other conventions. + +The first thing to understand is +that there are two conflicting conventions for indexing 2-dimensional arrays. +Matrix notation uses the first index to indicate which row is being selected and +the second index to indicate which column is selected. This is opposite the +geometrically oriented-convention for images where people generally think the +first index represents x position (i.e., column) and the second represents y +position (i.e., row). This alone is the source of much confusion; +matrix-oriented users and image-oriented users expect two different things with +regard to indexing. + +The second issue to understand is how indices correspond +to the order the array is stored in memory. In Fortran the first index is the +most rapidly varying index when moving through the elements of a two +dimensional array as it is stored in memory. If you adopt the matrix +convention for indexing, then this means the matrix is stored one column at a +time (since the first index moves to the next row as it changes). Thus Fortran +is considered a Column-major language. C has just the opposite convention. In +C, the last index changes most rapidly as one moves through the array as +stored in memory. Thus C is a Row-major language. The matrix is stored by +rows. Note that in both cases it presumes that the matrix convention for +indexing is being used, i.e., for both Fortran and C, the first index is the +row. Note this convention implies that the indexing convention is invariant +and that the data order changes to keep that so. + +But that's not the only way +to look at it. Suppose one has large two-dimensional arrays (images or +matrices) stored in data files. Suppose the data are stored by rows rather than +by columns. If we are to preserve our index convention (whether matrix or +image) that means that depending on the language we use, we may be forced to +reorder the data if it is read into memory to preserve our indexing +convention. For example if we read row-ordered data into memory without +reordering, it will match the matrix indexing convention for C, but not for +Fortran. Conversely, it will match the image indexing convention for Fortran, +but not for C. For C, if one is using data stored in row order, and one wants +to preserve the image index convention, the data must be reordered when +reading into memory. + +In the end, which you do for Fortran or C depends on +which is more important, not reordering data or preserving the indexing +convention. For large images, reordering data is potentially expensive, and +often the indexing convention is inverted to avoid that. + +The situation with +numpy makes this issue yet more complicated. The internal machinery of numpy +arrays is flexible enough to accept any ordering of indices. One can simply +reorder indices by manipulating the internal stride information for arrays +without reordering the data at all. Numpy will know how to map the new index +order to the data without moving the data. + +So if this is true, why not choose +the index order that matches what you most expect? In particular, why not define +row-ordered images to use the image convention? (This is sometimes referred +to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN' +order options for array ordering in numpy.) The drawback of doing this is +potential performance penalties. It's common to access the data sequentially, +either implicitly in array operations or explicitly by looping over rows of an +image. When that is done, then the data will be accessed in non-optimal order. +As the first index is incremented, what is actually happening is that elements +spaced far apart in memory are being sequentially accessed, with usually poor +memory access speeds. For example, for a two dimensional image 'im' defined so +that im[0, 10] represents the value at x=0, y=10. To be consistent with usual +Python behavior then im[0] would represent a column at x=0. Yet that data +would be spread over the whole array since the data are stored in row order. +Despite the flexibility of numpy's indexing, it can't really paper over the fact +basic operations are rendered inefficient because of data order or that getting +contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs +im[0]), thus one can't use an idiom such as for row in im; for col in im does +work, but doesn't yield contiguous column data. + +As it turns out, numpy is +smart enough when dealing with ufuncs to determine which index is the most +rapidly varying one in memory and uses that for the innermost loop. Thus for +ufuncs there is no large intrinsic advantage to either approach in most cases. +On the other hand, use of .flat with an FORTRAN ordered array will lead to +non-optimal memory access as adjacent elements in the flattened array (iterator, +actually) are not contiguous in memory. + +Indeed, the fact is that Python +indexing on lists and other sequences naturally leads to an outside-to inside +ordering (the first index gets the largest grouping, the next the next largest, +and the last gets the smallest element). Since image data are normally stored +by rows, this corresponds to position within rows being the last item indexed. + +If you do want to use Fortran ordering realize that +there are two approaches to consider: 1) accept that the first index is just not +the most rapidly changing in memory and have all your I/O routines reorder +your data when going from memory to disk or visa versa, or use numpy's +mechanism for mapping the first index to the most rapidly varying data. We +recommend the former if possible. The disadvantage of the latter is that many +of numpy's functions will yield arrays without Fortran ordering unless you are +careful to use the 'order' keyword. Doing this would be highly inconvenient. + +Otherwise we recommend simply learning to reverse the usual order of indices +when accessing elements of an array. Granted, it goes against the grain, but +it is more in line with Python semantics and the natural order of the data. + +""" diff --git a/numpy/doc/io.py b/numpy/doc/io.py new file mode 100644 index 000000000..3cde40bd0 --- /dev/null +++ b/numpy/doc/io.py @@ -0,0 +1,9 @@ +""" + +========= +Array I/O +========= + +Placeholder for array I/O documentation. + +""" diff --git a/numpy/doc/jargon.py b/numpy/doc/jargon.py new file mode 100644 index 000000000..e13ff5686 --- /dev/null +++ b/numpy/doc/jargon.py @@ -0,0 +1,9 @@ +""" + +====== +Jargon +====== + +Placeholder for computer science, engineering and other jargon. + +""" diff --git a/numpy/doc/methods_vs_functions.py b/numpy/doc/methods_vs_functions.py new file mode 100644 index 000000000..22eadccf7 --- /dev/null +++ b/numpy/doc/methods_vs_functions.py @@ -0,0 +1,9 @@ +""" + +===================== +Methods vs. Functions +===================== + +Placeholder for Methods vs. Functions documentation. + +""" diff --git a/numpy/doc/misc.py b/numpy/doc/misc.py new file mode 100644 index 000000000..e978100bf --- /dev/null +++ b/numpy/doc/misc.py @@ -0,0 +1,9 @@ +""" + +============= +Miscellaneous +============= + +Placeholder for other tips. + +""" diff --git a/numpy/doc/newdtype_example/example.py b/numpy/doc/newdtype_example/example.py deleted file mode 100644 index 7ee64ca00..000000000 --- a/numpy/doc/newdtype_example/example.py +++ /dev/null @@ -1,16 +0,0 @@ -import floatint.floatint as ff -import numpy as np - -# Setting using array is hard because -# The parser doesn't stop at tuples always -# So, the setitem code will be called with scalars on the -# wrong shaped array. -# But we can get a view as an ndarray of the given type: -g = np.array([1,2,3,4,5,6,7,8]).view(ff.floatint_type) - -# Now, the elements will be the scalar type associated -# with the ndarray. -print g[0] -print type(g[1]) - -# Now, you need to register ufuncs and more arrfuncs to do useful things... diff --git a/numpy/doc/newdtype_example/floatint.c b/numpy/doc/newdtype_example/floatint.c deleted file mode 100644 index cf698a7f9..000000000 --- a/numpy/doc/newdtype_example/floatint.c +++ /dev/null @@ -1,153 +0,0 @@ - -#include "Python.h" -#include "structmember.h" /* for offsetof macro if needed */ -#include "numpy/arrayobject.h" - - -/* Use a Python float as the cannonical type being added -*/ - -typedef struct _floatint { - PyObject_HEAD - npy_int32 first; - npy_int32 last; -} PyFloatIntObject; - -static PyTypeObject PyFloatInt_Type = { - PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "floatint.floatint", /*tp_name*/ - sizeof(PyFloatIntObject), /*tp_basicsize*/ -}; - -static PyArray_ArrFuncs _PyFloatInt_Funcs; - -#define _ALIGN(type) offsetof(struct {char c; type v;},v) - -/* The scalar-type */ - -static PyArray_Descr _PyFloatInt_Dtype = { - PyObject_HEAD_INIT(NULL) - &PyFloatInt_Type, - 'f', - '0', - '=', - 0, - 0, - sizeof(double), - _ALIGN(double), - NULL, - NULL, - NULL, - &_PyFloatInt_Funcs -}; - -static void -twoint_copyswap(void *dst, void *src, int swap, void *arr) -{ - if (src != NULL) - memcpy(dst, src, sizeof(double)); - - if (swap) { - register char *a, *b, c; - a = (char *)dst; - b = a + 7; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; - } -} - -static PyObject * -twoint_getitem(char *ip, PyArrayObject *ap) { - npy_int32 a[2]; - - if ((ap==NULL) || PyArray_ISBEHAVED_RO(ap)) { - a[0] = *((npy_int32 *)ip); - a[1] = *((npy_int32 *)ip + 1); - } - else { - ap->descr->f->copyswap(a, ip, !PyArray_ISNOTSWAPPED(ap), - ap); - } - return Py_BuildValue("(ii)", a[0], a[1]); -} - -static int -twoint_setitem(PyObject *op, char *ov, PyArrayObject *ap) { - npy_int32 a[2]; - - if (!PyTuple_Check(op)) { - PyErr_SetString(PyExc_TypeError, "must be a tuple"); - return -1; - } - if (!PyArg_ParseTuple(op, "ii", a, a+1)) return -1; - - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - memcpy(ov, a, sizeof(double)); - } - else { - ap->descr->f->copyswap(ov, a, !PyArray_ISNOTSWAPPED(ap), - ap); - } - return 0; -} - -static PyArray_Descr * _register_dtype(void) -{ - int userval; - PyArray_InitArrFuncs(&_PyFloatInt_Funcs); - /* Add copyswap, - nonzero, getitem, setitem*/ - _PyFloatInt_Funcs.copyswap = twoint_copyswap; - _PyFloatInt_Funcs.getitem = (PyArray_GetItemFunc *)twoint_getitem; - _PyFloatInt_Funcs.setitem = (PyArray_SetItemFunc *)twoint_setitem; - _PyFloatInt_Dtype.ob_type = &PyArrayDescr_Type; - - userval = PyArray_RegisterDataType(&_PyFloatInt_Dtype); - return PyArray_DescrFromType(userval); -} - - -/* Initialization function for the module (*must* be called init) */ - -PyMODINIT_FUNC initfloatint(void) { - PyObject *m, *d; - PyArray_Descr *dtype; - - /* Create the module and add the functions */ - m = Py_InitModule("floatint", NULL); - - /* Import the array objects */ - import_array(); - - - /* Initialize the new float type */ - - /* Add some symbolic constants to the module */ - d = PyModule_GetDict(m); - - if (PyType_Ready(&PyFloat_Type) < 0) return; - PyFloatInt_Type.tp_base = &PyFloat_Type; - /* This is only needed because we are sub-typing the - Float type and must pre-set some function pointers - to get PyType_Ready to fill in the rest. - */ - PyFloatInt_Type.tp_alloc = PyType_GenericAlloc; - PyFloatInt_Type.tp_new = PyFloat_Type.tp_new; - PyFloatInt_Type.tp_dealloc = PyFloat_Type.tp_dealloc; - PyFloatInt_Type.tp_free = PyObject_Del; - if (PyType_Ready(&PyFloatInt_Type) < 0) return; - /* End specific code */ - - - dtype = _register_dtype(); - Py_XINCREF(dtype); - if (dtype != NULL) { - PyDict_SetItemString(d, "floatint_type", (PyObject *)dtype); - } - Py_INCREF(&PyFloatInt_Type); - PyDict_SetItemString(d, "floatint", (PyObject *)&PyFloatInt_Type); - return; -} diff --git a/numpy/doc/newdtype_example/floatint/__init__.py b/numpy/doc/newdtype_example/floatint/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/numpy/doc/newdtype_example/setup.py b/numpy/doc/newdtype_example/setup.py deleted file mode 100644 index 3b9d75578..000000000 --- a/numpy/doc/newdtype_example/setup.py +++ /dev/null @@ -1,12 +0,0 @@ - -from numpy.distutils.core import setup - -def configuration(parent_package = '', top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('floatint',parent_package,top_path) - - config.add_extension('floatint', - sources = ['floatint.c']); - return config - -setup(configuration=configuration) diff --git a/numpy/doc/npy-format.txt b/numpy/doc/npy-format.txt deleted file mode 100644 index 836468096..000000000 --- a/numpy/doc/npy-format.txt +++ /dev/null @@ -1,294 +0,0 @@ -Title: A Simple File Format for NumPy Arrays -Discussions-To: numpy-discussion@mail.scipy.org -Version: $Revision$ -Last-Modified: $Date$ -Author: Robert Kern -Status: Draft -Type: Standards Track -Content-Type: text/plain -Created: 20-Dec-2007 - - -Abstract - - We propose a standard binary file format (NPY) for persisting - a single arbitrary NumPy array on disk. The format stores all of - the shape and dtype information necessary to reconstruct the array - correctly even on another machine with a different architecture. - The format is designed to be as simple as possible while achieving - its limited goals. The implementation is intended to be pure - Python and distributed as part of the main numpy package. - - -Rationale - - A lightweight, omnipresent system for saving NumPy arrays to disk - is a frequent need. Python in general has pickle [1] for saving - most Python objects to disk. This often works well enough with - NumPy arrays for many purposes, but it has a few drawbacks: - - - Dumping or loading a pickle file require the duplication of the - data in memory. For large arrays, this can be a showstopper. - - - The array data is not directly accessible through - memory-mapping. Now that numpy has that capability, it has - proved very useful for loading large amounts of data (or more to - the point: avoiding loading large amounts of data when you only - need a small part). - - Both of these problems can be addressed by dumping the raw bytes - to disk using ndarray.tofile() and numpy.fromfile(). However, - these have their own problems: - - - The data which is written has no information about the shape or - dtype of the array. - - - It is incapable of handling object arrays. - - The NPY file format is an evolutionary advance over these two - approaches. Its design is mostly limited to solving the problems - with pickles and tofile()/fromfile(). It does not intend to solve - more complicated problems for which more complicated formats like - HDF5 [2] are a better solution. - - -Use Cases - - - Neville Newbie has just started to pick up Python and NumPy. He - has not installed many packages, yet, nor learned the standard - library, but he has been playing with NumPy at the interactive - prompt to do small tasks. He gets a result that he wants to - save. - - - Annie Analyst has been using large nested record arrays to - represent her statistical data. She wants to convince her - R-using colleague, David Doubter, that Python and NumPy are - awesome by sending him her analysis code and data. She needs - the data to load at interactive speeds. Since David does not - use Python usually, needing to install large packages would turn - him off. - - - Simon Seismologist is developing new seismic processing tools. - One of his algorithms requires large amounts of intermediate - data to be written to disk. The data does not really fit into - the industry-standard SEG-Y schema, but he already has a nice - record-array dtype for using it internally. - - - Polly Parallel wants to split up a computation on her multicore - machine as simply as possible. Parts of the computation can be - split up among different processes without any communication - between processes; they just need to fill in the appropriate - portion of a large array with their results. Having several - child processes memory-mapping a common array is a good way to - achieve this. - - -Requirements - - The format MUST be able to: - - - Represent all NumPy arrays including nested record - arrays and object arrays. - - - Represent the data in its native binary form. - - - Be contained in a single file. - - - Support Fortran-contiguous arrays directly. - - - Store all of the necessary information to reconstruct the array - including shape and dtype on a machine of a different - architecture. Both little-endian and big-endian arrays must be - supported and a file with little-endian numbers will yield - a little-endian array on any machine reading the file. The - types must be described in terms of their actual sizes. For - example, if a machine with a 64-bit C "long int" writes out an - array with "long ints", a reading machine with 32-bit C "long - ints" will yield an array with 64-bit integers. - - - Be reverse engineered. Datasets often live longer than the - programs that created them. A competent developer should be - able create a solution in his preferred programming language to - read most NPY files that he has been given without much - documentation. - - - Allow memory-mapping of the data. - - - Be read from a filelike stream object instead of an actual file. - This allows the implementation to be tested easily and makes the - system more flexible. NPY files can be stored in ZIP files and - easily read from a ZipFile object. - - - Store object arrays. Since general Python objects are - complicated and can only be reliably serialized by pickle (if at - all), many of the other requirements are waived for files - containing object arrays. Files with object arrays do not have - to be mmapable since that would be technically impossible. We - cannot expect the pickle format to be reverse engineered without - knowledge of pickle. However, one should at least be able to - read and write object arrays with the same generic interface as - other arrays. - - - Be read and written using APIs provided in the numpy package - itself without any other libraries. The implementation inside - numpy may be in C if necessary. - - The format explicitly *does not* need to: - - - Support multiple arrays in a file. Since we require filelike - objects to be supported, one could use the API to build an ad - hoc format that supported multiple arrays. However, solving the - general problem and use cases is beyond the scope of the format - and the API for numpy. - - - Fully handle arbitrary subclasses of numpy.ndarray. Subclasses - will be accepted for writing, but only the array data will be - written out. A regular numpy.ndarray object will be created - upon reading the file. The API can be used to build a format - for a particular subclass, but that is out of scope for the - general NPY format. - - -Format Specification: Version 1.0 - - The first 6 bytes are a magic string: exactly "\x93NUMPY". - - The next 1 byte is an unsigned byte: the major version number of - the file format, e.g. \x01. - - The next 1 byte is an unsigned byte: the minor version number of - the file format, e.g. \x00. Note: the version of the file format - is not tied to the version of the numpy package. - - The next 2 bytes form a little-endian unsigned short int: the - length of the header data HEADER_LEN. - - The next HEADER_LEN bytes form the header data describing the - array's format. It is an ASCII string which contains a Python - literal expression of a dictionary. It is terminated by a newline - ('\n') and padded with spaces ('\x20') to make the total length of - the magic string + 4 + HEADER_LEN be evenly divisible by 16 for - alignment purposes. - - The dictionary contains three keys: - - "descr" : dtype.descr - An object that can be passed as an argument to the - numpy.dtype() constructor to create the array's dtype. - - "fortran_order" : bool - Whether the array data is Fortran-contiguous or not. - Since Fortran-contiguous arrays are a common form of - non-C-contiguity, we allow them to be written directly to - disk for efficiency. - - "shape" : tuple of int - The shape of the array. - - For repeatability and readability, this dictionary is formatted - using pprint.pformat() so the keys are in alphabetic order. - - Following the header comes the array data. If the dtype contains - Python objects (i.e. dtype.hasobject is True), then the data is - a Python pickle of the array. Otherwise the data is the - contiguous (either C- or Fortran-, depending on fortran_order) - bytes of the array. Consumers can figure out the number of bytes - by multiplying the number of elements given by the shape (noting - that shape=() means there is 1 element) by dtype.itemsize. - - -Conventions - - We recommend using the ".npy" extension for files following this - format. This is by no means a requirement; applications may wish - to use this file format but use an extension specific to the - application. In the absence of an obvious alternative, however, - we suggest using ".npy". - - For a simple way to combine multiple arrays into a single file, - one can use ZipFile to contain multiple ".npy" files. We - recommend using the file extension ".npz" for these archives. - - -Alternatives - - The author believes that this system (or one along these lines) is - about the simplest system that satisfies all of the requirements. - However, one must always be wary of introducing a new binary - format to the world. - - HDF5 [2] is a very flexible format that should be able to - represent all of NumPy's arrays in some fashion. It is probably - the only widely-used format that can faithfully represent all of - NumPy's array features. It has seen substantial adoption by the - scientific community in general and the NumPy community in - particular. It is an excellent solution for a wide variety of - array storage problems with or without NumPy. - - HDF5 is a complicated format that more or less implements - a hierarchical filesystem-in-a-file. This fact makes satisfying - some of the Requirements difficult. To the author's knowledge, as - of this writing, there is no application or library that reads or - writes even a subset of HDF5 files that does not use the canonical - libhdf5 implementation. This implementation is a large library - that is not always easy to build. It would be infeasible to - include it in numpy. - - It might be feasible to target an extremely limited subset of - HDF5. Namely, there would be only one object in it: the array. - Using contiguous storage for the data, one should be able to - implement just enough of the format to provide the same metadata - that the proposed format does. One could still meet all of the - technical requirements like mmapability. - - We would accrue a substantial benefit by being able to generate - files that could be read by other HDF5 software. Furthermore, by - providing the first non-libhdf5 implementation of HDF5, we would - be able to encourage more adoption of simple HDF5 in applications - where it was previously infeasible because of the size of the - library. The basic work may encourage similar dead-simple - implementations in other languages and further expand the - community. - - The remaining concern is about reverse engineerability of the - format. Even the simple subset of HDF5 would be very difficult to - reverse engineer given just a file by itself. However, given the - prominence of HDF5, this might not be a substantial concern. - - In conclusion, we are going forward with the design laid out in - this document. If someone writes code to handle the simple subset - of HDF5 that would be useful to us, we may consider a revision of - the file format. - - -Implementation - - The current implementation is in the trunk of the numpy SVN - repository and will be part of the 1.0.5 release. - - http://svn.scipy.org/svn/numpy/trunk - - Specifically, the file format.py in this directory implements the - format as described here. - - -References - - [1] http://docs.python.org/lib/module-pickle.html - - [2] http://hdf.ncsa.uiuc.edu/products/hdf5/index.html - - -Copyright - - This document has been placed in the public domain. - - - -Local Variables: -mode: indented-text -indent-tabs-mode: nil -sentence-end-double-space: t -fill-column: 70 -coding: utf-8 -End: diff --git a/numpy/doc/pep_buffer.txt b/numpy/doc/pep_buffer.txt deleted file mode 100644 index a154d2792..000000000 --- a/numpy/doc/pep_buffer.txt +++ /dev/null @@ -1,869 +0,0 @@ -:PEP: 3118 -:Title: Revising the buffer protocol -:Version: $Revision$ -:Last-Modified: $Date$ -:Authors: Travis Oliphant , Carl Banks -:Status: Draft -:Type: Standards Track -:Content-Type: text/x-rst -:Created: 28-Aug-2006 -:Python-Version: 3000 - -Abstract -======== - -This PEP proposes re-designing the buffer interface (PyBufferProcs -function pointers) to improve the way Python allows memory sharing -in Python 3.0 - -In particular, it is proposed that the character buffer portion -of the API be elminated and the multiple-segment portion be -re-designed in conjunction with allowing for strided memory -to be shared. In addition, the new buffer interface will -allow the sharing of any multi-dimensional nature of the -memory and what data-format the memory contains. - -This interface will allow any extension module to either -create objects that share memory or create algorithms that -use and manipulate raw memory from arbitrary objects that -export the interface. - - -Rationale -========= - -The Python 2.X buffer protocol allows different Python types to -exchange a pointer to a sequence of internal buffers. This -functionality is *extremely* useful for sharing large segments of -memory between different high-level objects, but it is too limited and -has issues: - -1. There is the little used "sequence-of-segments" option - (bf_getsegcount) that is not well motivated. - -2. There is the apparently redundant character-buffer option - (bf_getcharbuffer) - -3. There is no way for a consumer to tell the buffer-API-exporting - object it is "finished" with its view of the memory and - therefore no way for the exporting object to be sure that it is - safe to reallocate the pointer to the memory that it owns (for - example, the array object reallocating its memory after sharing - it with the buffer object which held the original pointer led - to the infamous buffer-object problem). - -4. Memory is just a pointer with a length. There is no way to - describe what is "in" the memory (float, int, C-structure, etc.) - -5. There is no shape information provided for the memory. But, - several array-like Python types could make use of a standard - way to describe the shape-interpretation of the memory - (wxPython, GTK, pyQT, CVXOPT, PyVox, Audio and Video - Libraries, ctypes, NumPy, data-base interfaces, etc.) - -6. There is no way to share discontiguous memory (except through - the sequence of segments notion). - - There are two widely used libraries that use the concept of - discontiguous memory: PIL and NumPy. Their view of discontiguous - arrays is different, though. The proposed buffer interface allows - sharing of either memory model. Exporters will use only one - approach and consumers may choose to support discontiguous - arrays of each type however they choose. - - NumPy uses the notion of constant striding in each dimension as its - basic concept of an array. With this concept, a simple sub-region - of a larger array can be described without copying the data. - Thus, stride information is the additional information that must be - shared. - - The PIL uses a more opaque memory representation. Sometimes an - image is contained in a contiguous segment of memory, but sometimes - it is contained in an array of pointers to the contiguous segments - (usually lines) of the image. The PIL is where the idea of multiple - buffer segments in the original buffer interface came from. - - NumPy's strided memory model is used more often in computational - libraries and because it is so simple it makes sense to support - memory sharing using this model. The PIL memory model is sometimes - used in C-code where a 2-d array can be then accessed using double - pointer indirection: e.g. image[i][j]. - - The buffer interface should allow the object to export either of these - memory models. Consumers are free to either require contiguous memory - or write code to handle one or both of these memory models. - -Proposal Overview -================= - -* Eliminate the char-buffer and multiple-segment sections of the - buffer-protocol. - -* Unify the read/write versions of getting the buffer. - -* Add a new function to the interface that should be called when - the consumer object is "done" with the memory area. - -* Add a new variable to allow the interface to describe what is in - memory (unifying what is currently done now in struct and - array) - -* Add a new variable to allow the protocol to share shape information - -* Add a new variable for sharing stride information - -* Add a new mechanism for sharing arrays that must - be accessed using pointer indirection. - -* Fix all objects in the core and the standard library to conform - to the new interface - -* Extend the struct module to handle more format specifiers - -* Extend the buffer object into a new memory object which places - a Python veneer around the buffer interface. - -* Add a few functions to make it easy to copy contiguous data - in and out of object supporting the buffer interface. - -Specification -============= - -While the new specification allows for complicated memory sharing. -Simple contiguous buffers of bytes can still be obtained from an -object. In fact, the new protocol allows a standard mechanism for -doing this even if the original object is not represented as a -contiguous chunk of memory. - -The easiest way to obtain a simple contiguous chunk of memory is -to use the provided C-API to obtain a chunk of memory. - - -Change the PyBufferProcs structure to - -:: - - typedef struct { - getbufferproc bf_getbuffer; - releasebufferproc bf_releasebuffer; - } - - -:: - - typedef int (*getbufferproc)(PyObject *obj, PyBuffer *view, int flags) - -This function returns 0 on success and -1 on failure (and raises an -error). The first variable is the "exporting" object. The second -argument is the address to a bufferinfo structure. If view is NULL, -then no information is returned but a lock on the memory is still -obtained. In this case, the corresponding releasebuffer should also -be called with NULL. - -The third argument indicates what kind of buffer the exporter is -allowed to return. It essentially tells the exporter what kind of -memory area the consumer can deal with. It also indicates what -members of the PyBuffer structure the consumer is going to care about. - -The exporter can use this information to simplify how much of the PyBuffer -structure is filled in and/or raise an error if the object can't support -a simpler view of its memory. - -Thus, the caller can request a simple "view" and either receive it or -have an error raised if it is not possible. - -All of the following assume that at least buf, len, and readonly -will always be utilized by the caller. - -Py_BUF_SIMPLE - - The returned buffer will be assumed to be readable (the object may - or may not have writeable memory). Only the buf, len, and readonly - variables may be accessed. The format will be assumed to be - unsigned bytes . This is a "stand-alone" flag constant. It never - needs to be \|'d to the others. The exporter will raise an - error if it cannot provide such a contiguous buffer. - -Py_BUF_WRITEABLE - - The returned buffer must be writeable. If it is not writeable, - then raise an error. - -Py_BUF_READONLY - - The returned buffer must be readonly. If the object is already - read-only or it can make its memory read-only (and there are no - other views on the object) then it should do so and return the - buffer information. If the object does not have read-only memory - (or cannot make it read-only), then an error should be raised. - -Py_BUF_FORMAT - - The returned buffer must have true format information. This would - be used when the consumer is going to be checking for what 'kind' - of data is actually stored. An exporter should always be able - to provide this information if requested. - -Py_BUF_SHAPE - - The returned buffer must have shape information. The memory will - be assumed C-style contiguous (last dimension varies the fastest). - The exporter may raise an error if it cannot provide this kind - of contiguous buffer. - -Py_BUF_STRIDES (implies Py_BUF_SHAPE) - - The returned buffer must have strides information. This would be - used when the consumer can handle strided, discontiguous arrays. - Handling strides automatically assumes you can handle shape. - The exporter may raise an error if cannot provide a strided-only - representation of the data (i.e. without the suboffsets). - -Py_BUF_OFFSETS (implies Py_BUF_STRIDES) - - The returned buffer must have suboffsets information. This would - be used when the consumer can handle indirect array referencing - implied by these suboffsets. - -Py_BUF_FULL (Py_BUF_OFFSETS | Py_BUF_WRITEABLE | Py_BUF_FORMAT) - -Thus, the consumer simply wanting a contiguous chunk of bytes from -the object would use Py_BUF_SIMPLE, while a consumer that understands -how to make use of the most complicated cases could use Py_BUF_INDIRECT. - -If format information is going to be probed, then Py_BUF_FORMAT must -be \|'d to the flags otherwise the consumer assumes it is unsigned -bytes. - -There is a C-API that simple exporting objects can use to fill-in the -buffer info structure correctly according to the provided flags if a -contiguous chunk of "unsigned bytes" is all that can be exported. - - -The bufferinfo structure is:: - - struct bufferinfo { - void *buf; - Py_ssize_t len; - int readonly; - const char *format; - int ndims; - Py_ssize_t *shape; - Py_ssize_t *strides; - Py_ssize_t *suboffsets; - int itemsize; - void *internal; - } PyBuffer; - -Before calling this function, the bufferinfo structure can be filled -with whatever. Upon return from getbufferproc, the bufferinfo -structure is filled in with relevant information about the buffer. -This same bufferinfo structure must be passed to bf_releasebuffer (if -available) when the consumer is done with the memory. The caller is -responsible for keeping a reference to obj until releasebuffer is -called (i.e. this call does not alter the reference count of obj). - -The members of the bufferinfo structure are: - -buf - a pointer to the start of the memory for the object - -len - the total bytes of memory the object uses. This should be the - same as the product of the shape array multiplied by the number of - bytes per item of memory. - -readonly - an integer variable to hold whether or not the memory is - readonly. 1 means the memory is readonly, zero means the - memory is writeable. - -format - a NULL-terminated format-string (following the struct-style syntax - including extensions) indicating what is in each element of - memory. The number of elements is len / itemsize, where itemsize - is the number of bytes implied by the format. For standard - unsigned bytes use a format string of "B". - -ndims - a variable storing the number of dimensions the memory represents. - Must be >=0. - -shape - an array of ``Py_ssize_t`` of length ``ndims`` indicating the - shape of the memory as an N-D array. Note that ``((*shape)[0] * - ... * (*shape)[ndims-1])*itemsize = len``. If ndims is 0 (indicating - a scalar), then this must be NULL. - -strides - address of a ``Py_ssize_t*`` variable that will be filled with a - pointer to an array of ``Py_ssize_t`` of length ``ndims`` (or NULL - if ndims is 0). indicating the number of bytes to skip to get to - the next element in each dimension. If this is not requested by - the caller (BUF_STRIDES is not set), then this member of the - structure will not be used and the consumer is assuming the array - is C-style contiguous. If this is not the case, then an error - should be raised. If this member is requested by the caller - (BUF_STRIDES is set), then it must be filled in. - - -suboffsets - address of a ``Py_ssize_t *`` variable that will be filled with a - pointer to an array of ``Py_ssize_t`` of length ``*ndims``. If - these suboffset numbers are >=0, then the value stored along the - indicated dimension is a pointer and the suboffset value dictates - how many bytes to add to the pointer after de-referencing. A - suboffset value that it negative indicates that no de-referencing - should occur (striding in a contiguous memory block). If all - suboffsets are negative (i.e. no de-referencing is needed, then - this must be NULL. - - For clarity, here is a function that returns a pointer to the - element in an N-D array pointed to by an N-dimesional index when - there are both strides and suboffsets.:: - - void* get_item_pointer(int ndim, void* buf, Py_ssize_t* strides, - Py_ssize_t* suboffsets, Py_ssize_t *indices) { - char* pointer = (char*)buf; - int i; - for (i = 0; i < ndim; i++) { - pointer += strides[i]*indices[i]; - if (suboffsets[i] >=0 ) { - pointer = *((char**)pointer) + suboffsets[i]; - } - } - return (void*)pointer; - } - - Notice the suboffset is added "after" the dereferencing occurs. - Thus slicing in the ith dimension would add to the suboffsets in - the (i-1)st dimension. Slicing in the first dimension would change - the location of the starting pointer directly (i.e. buf would - be modified). - -itemsize - This is a storage for the itemsize of each element of the shared - memory. It can be obtained using PyBuffer_SizeFromFormat but an - exporter may know it without making this call and thus storing it - is more convenient and faster. - -internal - This is for use internally by the exporting object. For example, - this might be re-cast as an integer by the exporter and used to - store flags about whether or not the shape, strides, and suboffsets - arrays must be freed when the buffer is released. The consumer - should never touch this value. - - -The exporter is responsible for making sure the memory pointed to by -buf, format, shape, strides, and suboffsets is valid until -releasebuffer is called. If the exporter wants to be able to change -shape, strides, and/or suboffsets before releasebuffer is called then -it should allocate those arrays when getbuffer is called (pointing to -them in the buffer-info structure provided) and free them when -releasebuffer is called. - - -The same bufferinfo struct should be used in the release-buffer -interface call. The caller is responsible for the memory of the -bufferinfo structure itself. - -``typedef int (*releasebufferproc)(PyObject *obj, PyBuffer *view)`` - Callers of getbufferproc must make sure that this function is - called when memory previously acquired from the object is no - longer needed. The exporter of the interface must make sure that - any memory pointed to in the bufferinfo structure remains valid - until releasebuffer is called. - - Both of these routines are optional for a type object - - If the releasebuffer function is not provided then it does not ever - need to be called. - -Exporters will need to define a releasebuffer function if they can -re-allocate their memory, strides, shape, suboffsets, or format -variables which they might share through the struct bufferinfo. -Several mechanisms could be used to keep track of how many getbuffer -calls have been made and shared. Either a single variable could be -used to keep track of how many "views" have been exported, or a -linked-list of bufferinfo structures filled in could be maintained in -each object. - -All that is specifically required by the exporter, however, is to -ensure that any memory shared through the bufferinfo structure remains -valid until releasebuffer is called on the bufferinfo structure. - - -New C-API calls are proposed -============================ - -:: - - int PyObject_CheckBuffer(PyObject *obj) - -Return 1 if the getbuffer function is available otherwise 0. - -:: - - int PyObject_GetBuffer(PyObject *obj, PyBuffer *view, - int flags) - -This is a C-API version of the getbuffer function call. It checks to -make sure object has the required function pointer and issues the -call. Returns -1 and raises an error on failure and returns 0 on -success. - -:: - - int PyObject_ReleaseBuffer(PyObject *obj, PyBuffer *view) - -This is a C-API version of the releasebuffer function call. It checks -to make sure the object has the required function pointer and issues -the call. Returns 0 on success and -1 (with an error raised) on -failure. This function always succeeds if there is no releasebuffer -function for the object. - -:: - - PyObject *PyObject_GetMemoryView(PyObject *obj) - -Return a memory-view object from an object that defines the buffer interface. - -A memory-view object is an extended buffer object that could replace -the buffer object (but doesn't have to). It's C-structure is - -:: - - typedef struct { - PyObject_HEAD - PyObject *base; - int ndims; - Py_ssize_t *starts; /* slice starts */ - Py_ssize_t *stops; /* slice stops */ - Py_ssize_t *steps; /* slice steps */ - } PyMemoryViewObject; - -This is functionally similar to the current buffer object except only -a reference to base is kept. The actual memory for base must be -re-grabbed using the buffer-protocol, whenever it is needed. - -The getbuffer and releasebuffer for this object use the underlying -base object (adjusted using the slice information). If the number of -dimensions of the base object (or the strides or the size) has changed -when a new view is requested, then the getbuffer will trigger an error. - -This memory-view object will support mult-dimensional slicing. Slices -of the memory-view object are other memory-view objects. When an -"element" from the memory-view is returned it is always a tuple of -bytes object + format string which can then be interpreted using the -struct module if desired. - -:: - - int PyBuffer_SizeFromFormat(const char *) - -Return the implied itemsize of the data-format area from a struct-style -description. - -:: - - int PyObject_GetContiguous(PyObject *obj, void **buf, Py_ssize_t *len, - char **format, char fortran) - -Return a contiguous chunk of memory representing the buffer. If a -copy is made then return 1. If no copy was needed return 0. If an -error occurred in probing the buffer interface, then return -1. The -contiguous chunk of memory is pointed to by ``*buf`` and the length of -that memory is ``*len``. If the object is multi-dimensional, then if -fortran is 'F', the first dimension of the underlying array will vary -the fastest in the buffer. If fortran is 'C', then the last dimension -will vary the fastest (C-style contiguous). If fortran is 'A', then it -does not matter and you will get whatever the object decides is more -efficient. - -:: - - int PyObject_CopyToObject(PyObject *obj, void *buf, Py_ssize_t len, - char fortran) - -Copy ``len`` bytes of data pointed to by the contiguous chunk of -memory pointed to by ``buf`` into the buffer exported by obj. Return -0 on success and return -1 and raise an error on failure. If the -object does not have a writeable buffer, then an error is raised. If -fortran is 'F', then if the object is multi-dimensional, then the data -will be copied into the array in Fortran-style (first dimension varies -the fastest). If fortran is 'C', then the data will be copied into the -array in C-style (last dimension varies the fastest). If fortran is 'A', then -it does not matter and the copy will be made in whatever way is more -efficient. - -:: - - void PyBuffer_FreeMem(void *buf) - -This function frees the memory returned by PyObject_GetContiguous if a -copy was made. Do not call this function unless -PyObject_GetContiguous returns a 1 indicating that new memory was -created. - - -These last three C-API calls allow a standard way of getting data in and -out of Python objects into contiguous memory areas no matter how it is -actually stored. These calls use the extended buffer interface to perform -their work. - -:: - - int PyBuffer_IsContiguous(PyBuffer *view, char fortran); - -Return 1 if the memory defined by the view object is C-style (fortran = 'C') -or Fortran-style (fortran = 'A') contiguous. Return 0 otherwise. - -:: - - void PyBuffer_FillContiguousStrides(int *ndims, Py_ssize_t *shape, - int itemsize, - Py_ssize_t *strides, char fortran) - -Fill the strides array with byte-strides of a contiguous (C-style if -fortran is 0 or Fortran-style if fortran is 1) array of the given -shape with the given number of bytes per element. - -:: - - int PyBuffer_FillInfo(PyBuffer *view, void *buf, - Py_ssize_t len, int readonly, int infoflags) - -Fills in a buffer-info structure correctly for an exporter that can -only share a contiguous chunk of memory of "unsigned bytes" of the -given length. Returns 0 on success and -1 (with raising an error) on -error. - - -Additions to the struct string-syntax -===================================== - -The struct string-syntax is missing some characters to fully -implement data-format descriptions already available elsewhere (in -ctypes and NumPy for example). The Python 2.5 specification is -at http://docs.python.org/lib/module-struct.html - -Here are the proposed additions: - - -================ =========== -Character Description -================ =========== -'t' bit (number before states how many bits) -'?' platform _Bool type -'g' long double -'c' ucs-1 (latin-1) encoding -'u' ucs-2 -'w' ucs-4 -'O' pointer to Python Object -'Z' complex (whatever the next specifier is) -'&' specific pointer (prefix before another charater) -'T{}' structure (detailed layout inside {}) -'(k1,k2,...,kn)' multi-dimensional array of whatever follows -':name:' optional name of the preceeding element -'X{}' pointer to a function (optional function - signature inside {}) -' \n\t' ignored (allow better readability) - -- this may already be true -================ =========== - -The struct module will be changed to understand these as well and -return appropriate Python objects on unpacking. Unpacking a -long-double will return a decimal object or a ctypes long-double. -Unpacking 'u' or 'w' will return Python unicode. Unpacking a -multi-dimensional array will return a list (of lists if >1d). -Unpacking a pointer will return a ctypes pointer object. Unpacking a -function pointer will return a ctypes call-object (perhaps). Unpacking -a bit will return a Python Bool. White-space in the struct-string -syntax will be ignored if it isn't already. Unpacking a named-object -will return some kind of named-tuple-like object that acts like a -tuple but whose entries can also be accessed by name. Unpacking a -nested structure will return a nested tuple. - -Endian-specification ('!', '@','=','>','<', '^') is also allowed -inside the string so that it can change if needed. The -previously-specified endian string is in force until changed. The -default endian is '@' which means native data-types and alignment. If -un-aligned, native data-types are requested, then the endian -specification is '^'. - -According to the struct-module, a number can preceed a character -code to specify how many of that type there are. The -(k1,k2,...,kn) extension also allows specifying if the data is -supposed to be viewed as a (C-style contiguous, last-dimension -varies the fastest) multi-dimensional array of a particular format. - -Functions should be added to ctypes to create a ctypes object from -a struct description, and add long-double, and ucs-2 to ctypes. - -Examples of Data-Format Descriptions -==================================== - -Here are some examples of C-structures and how they would be -represented using the struct-style syntax. - - is the constructor for a named-tuple (not-specified yet). - -float - 'f' <--> Python float -complex double - 'Zd' <--> Python complex -RGB Pixel data - 'BBB' <--> (int, int, int) - 'B:r: B:g: B:b:' <--> ((int, int, int), ('r','g','b')) - -Mixed endian (weird but possible) - '>i:big: ((int, int), ('big', 'little')) - -Nested structure - :: - - struct { - int ival; - struct { - unsigned short sval; - unsigned char bval; - unsigned char cval; - } sub; - } - """i:ival: - T{ - H:sval: - B:bval: - B:cval: - }:sub: - """ -Nested array - :: - - struct { - int ival; - double data[16*4]; - } - """i:ival: - (16,4)d:data: - """ - - -Code to be affected -=================== - -All objects and modules in Python that export or consume the old -buffer interface will be modified. Here is a partial list. - -* buffer object -* bytes object -* string object -* array module -* struct module -* mmap module -* ctypes module - -Anything else using the buffer API. - - -Issues and Details -================== - -It is intended that this PEP will be back-ported to Python 2.6 by -adding the C-API and the two functions to the existing buffer -protocol. - -The proposed locking mechanism relies entirely on the exporter object -to not invalidate any of the memory pointed to by the buffer structure -until a corresponding releasebuffer is called. If it wants to be able -to change its own shape and/or strides arrays, then it needs to create -memory for these in the bufferinfo structure and copy information -over. - -The sharing of strided memory and suboffsets is new and can be seen as -a modification of the multiple-segment interface. It is motivated by -NumPy and the PIL. NumPy objects should be able to share their -strided memory with code that understands how to manage strided memory -because strided memory is very common when interfacing with compute -libraries. - -Also, with this approach it should be possible to write generic code -that works with both kinds of memory. - -Memory management of the format string, the shape array, the strides -array, and the suboffsets array in the bufferinfo structure is always -the responsibility of the exporting object. The consumer should not -set these pointers to any other memory or try to free them. - -Several ideas were discussed and rejected: - - Having a "releaser" object whose release-buffer was called. This - was deemed unacceptable because it caused the protocol to be - asymmetric (you called release on something different than you - "got" the buffer from). It also complicated the protocol without - providing a real benefit. - - Passing all the struct variables separately into the function. - This had the advantage that it allowed one to set NULL to - variables that were not of interest, but it also made the function - call more difficult. The flags variable allows the same - ability of consumers to be "simple" in how they call the protocol. - -Code -======== - -The authors of the PEP promise to contribute and maintain the code for -this proposal but will welcome any help. - - - - -Examples -========= - -Ex. 1 ------------ - -This example shows how an image object that uses contiguous lines might expose its buffer. - -:: - - struct rgba { - unsigned char r, g, b, a; - }; - - struct ImageObject { - PyObject_HEAD; - ... - struct rgba** lines; - Py_ssize_t height; - Py_ssize_t width; - Py_ssize_t shape_array[2]; - Py_ssize_t stride_array[2]; - Py_ssize_t view_count; - }; - -"lines" points to malloced 1-D array of (struct rgba*). Each pointer -in THAT block points to a seperately malloced array of (struct rgba). - -In order to access, say, the red value of the pixel at x=30, y=50, you'd use "lines[50][30].r". - -So what does ImageObject's getbuffer do? Leaving error checking out:: - - int Image_getbuffer(PyObject *self, PyBuffer *view, int flags) { - - static Py_ssize_t suboffsets[2] = { -1, 0 }; - - view->buf = self->lines; - view->len = self->height*self->width; - view->readonly = 0; - view->ndims = 2; - self->shape_array[0] = height; - self->shape_array[1] = width; - view->shape = &self->shape_array; - self->stride_array[0] = sizeof(struct rgba*); - self->stride_array[1] = sizeof(struct rgba); - view->strides = &self->stride_array; - view->suboffsets = suboffsets; - - self->view_count ++; - - return 0; - } - - - int Image_releasebuffer(PyObject *self, PyBuffer *view) { - self->view_count--; - return 0; - } - - -Ex. 2 ------------ - -This example shows how an object that wants to expose a contiguous -chunk of memory (which will never be re-allocated while the object is -alive) would do that. - -:: - - int myobject_getbuffer(PyObject *self, PyBuffer *view, int flags) { - - void *buf; - Py_ssize_t len; - int readonly=0; - - buf = /* Point to buffer */ - len = /* Set to size of buffer */ - readonly = /* Set to 1 if readonly */ - - return PyObject_FillBufferInfo(view, buf, len, readonly, flags); - } - -No releasebuffer is necessary because the memory will never -be re-allocated so the locking mechanism is not needed. - -Ex. 3 ------------ - -A consumer that wants to only get a simple contiguous chunk of bytes -from a Python object, obj would do the following: - -:: - - PyBuffer view; - int ret; - - if (PyObject_GetBuffer(obj, &view, Py_BUF_SIMPLE) < 0) { - /* error return */ - } - - /* Now, view.buf is the pointer to memory - view.len is the length - view.readonly is whether or not the memory is read-only. - */ - - - /* After using the information and you don't need it anymore */ - - if (PyObject_ReleaseBuffer(obj, &view) < 0) { - /* error return */ - } - - -Ex. 4 ------------ - -A consumer that wants to be able to use any object's memory but is -writing an algorithm that only handle contiguous memory could do the following: - -:: - - void *buf; - Py_ssize_t len; - char *format; - - if (PyObject_GetContiguous(obj, &buf, &len, &format, 0) < 0) { - /* error return */ - } - - /* process memory pointed to by buffer if format is correct */ - - /* Optional: - - if, after processing, we want to copy data from buffer back - into the the object - - we could do - */ - - if (PyObject_CopyToObject(obj, buf, len, 0) < 0) { - /* error return */ - } - - -Copyright -========= - -This PEP is placed in the public domain diff --git a/numpy/doc/performance.py b/numpy/doc/performance.py new file mode 100644 index 000000000..1429e232f --- /dev/null +++ b/numpy/doc/performance.py @@ -0,0 +1,9 @@ +""" + +=========== +Performance +=========== + +Placeholder for Improving Performance documentation. + +""" diff --git a/numpy/doc/pyrex/MANIFEST b/numpy/doc/pyrex/MANIFEST deleted file mode 100644 index feb3ec22a..000000000 --- a/numpy/doc/pyrex/MANIFEST +++ /dev/null @@ -1,2 +0,0 @@ -numpyx.pyx -setup.py diff --git a/numpy/doc/pyrex/Makefile b/numpy/doc/pyrex/Makefile deleted file mode 100644 index b5905e7be..000000000 --- a/numpy/doc/pyrex/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -all: - python setup.py build_ext --inplace - -test: all - python run_test.py - -.PHONY: clean -clean: - rm -rf *~ *.so *.c *.o build diff --git a/numpy/doc/pyrex/README.txt b/numpy/doc/pyrex/README.txt deleted file mode 100644 index 9df1e6c8e..000000000 --- a/numpy/doc/pyrex/README.txt +++ /dev/null @@ -1,3 +0,0 @@ -WARNING: this code is deprecated and slated for removal soon. See the -doc/cython directory for the replacement, which uses Cython (the actively -maintained version of Pyrex). diff --git a/numpy/doc/pyrex/c_numpy.pxd b/numpy/doc/pyrex/c_numpy.pxd deleted file mode 100644 index 511acc4b1..000000000 --- a/numpy/doc/pyrex/c_numpy.pxd +++ /dev/null @@ -1,125 +0,0 @@ -# :Author: Travis Oliphant - -cdef extern from "numpy/arrayobject.h": - - cdef enum NPY_TYPES: - NPY_BOOL - NPY_BYTE - NPY_UBYTE - NPY_SHORT - NPY_USHORT - NPY_INT - NPY_UINT - NPY_LONG - NPY_ULONG - NPY_LONGLONG - NPY_ULONGLONG - NPY_FLOAT - NPY_DOUBLE - NPY_LONGDOUBLE - NPY_CFLOAT - NPY_CDOUBLE - NPY_CLONGDOUBLE - NPY_OBJECT - NPY_STRING - NPY_UNICODE - NPY_VOID - NPY_NTYPES - NPY_NOTYPE - - cdef enum requirements: - NPY_CONTIGUOUS - NPY_FORTRAN - NPY_OWNDATA - NPY_FORCECAST - NPY_ENSURECOPY - NPY_ENSUREARRAY - NPY_ELEMENTSTRIDES - NPY_ALIGNED - NPY_NOTSWAPPED - NPY_WRITEABLE - NPY_UPDATEIFCOPY - NPY_ARR_HAS_DESCR - - NPY_BEHAVED - NPY_BEHAVED_NS - NPY_CARRAY - NPY_CARRAY_RO - NPY_FARRAY - NPY_FARRAY_RO - NPY_DEFAULT - - NPY_IN_ARRAY - NPY_OUT_ARRAY - NPY_INOUT_ARRAY - NPY_IN_FARRAY - NPY_OUT_FARRAY - NPY_INOUT_FARRAY - - NPY_UPDATE_ALL - - cdef enum defines: - # Note: as of Pyrex 0.9.5, enums are type-checked more strictly, so this - # can't be used as an integer. - NPY_MAXDIMS - - ctypedef struct npy_cdouble: - double real - double imag - - ctypedef struct npy_cfloat: - double real - double imag - - ctypedef int npy_intp - - ctypedef extern class numpy.dtype [object PyArray_Descr]: - cdef int type_num, elsize, alignment - cdef char type, kind, byteorder, hasobject - cdef object fields, typeobj - - ctypedef extern class numpy.ndarray [object PyArrayObject]: - cdef char *data - cdef int nd - cdef npy_intp *dimensions - cdef npy_intp *strides - cdef object base - cdef dtype descr - cdef int flags - - ctypedef extern class numpy.flatiter [object PyArrayIterObject]: - cdef int nd_m1 - cdef npy_intp index, size - cdef ndarray ao - cdef char *dataptr - - ctypedef extern class numpy.broadcast [object PyArrayMultiIterObject]: - cdef int numiter - cdef npy_intp size, index - cdef int nd - # These next two should be arrays of [NPY_MAXITER], but that is - # difficult to cleanly specify in Pyrex. Fortunately, it doesn't matter. - cdef npy_intp *dimensions - cdef void **iters - - object PyArray_ZEROS(int ndims, npy_intp* dims, NPY_TYPES type_num, int fortran) - object PyArray_EMPTY(int ndims, npy_intp* dims, NPY_TYPES type_num, int fortran) - dtype PyArray_DescrFromTypeNum(NPY_TYPES type_num) - object PyArray_SimpleNew(int ndims, npy_intp* dims, NPY_TYPES type_num) - int PyArray_Check(object obj) - object PyArray_ContiguousFromAny(object obj, NPY_TYPES type, - int mindim, int maxdim) - npy_intp PyArray_SIZE(ndarray arr) - npy_intp PyArray_NBYTES(ndarray arr) - void *PyArray_DATA(ndarray arr) - object PyArray_FromAny(object obj, dtype newtype, int mindim, int maxdim, - int requirements, object context) - object PyArray_FROMANY(object obj, NPY_TYPES type_num, int min, - int max, int requirements) - object PyArray_NewFromDescr(object subtype, dtype newtype, int nd, - npy_intp* dims, npy_intp* strides, void* data, - int flags, object parent) - - void PyArray_ITER_NEXT(flatiter it) - - void import_array() diff --git a/numpy/doc/pyrex/c_python.pxd b/numpy/doc/pyrex/c_python.pxd deleted file mode 100644 index 53f6d9b19..000000000 --- a/numpy/doc/pyrex/c_python.pxd +++ /dev/null @@ -1,20 +0,0 @@ -# -*- Mode: Python -*- Not really, but close enough - -# Expose as much of the Python C API as we need here - -cdef extern from "stdlib.h": - ctypedef int size_t - -cdef extern from "Python.h": - ctypedef int Py_intptr_t - void* PyMem_Malloc(size_t) - void* PyMem_Realloc(void *p, size_t n) - void PyMem_Free(void *p) - char* PyString_AsString(object string) - object PyString_FromString(char *v) - object PyString_InternFromString(char *v) - int PyErr_CheckSignals() - object PyFloat_FromDouble(double v) - void Py_XINCREF(object o) - void Py_XDECREF(object o) - void Py_CLEAR(object o) # use instead of decref diff --git a/numpy/doc/pyrex/notes b/numpy/doc/pyrex/notes deleted file mode 100644 index 301581cee..000000000 --- a/numpy/doc/pyrex/notes +++ /dev/null @@ -1,3 +0,0 @@ -- cimport with a .pxd file vs 'include foo.pxi'? - -- the need to repeat: pyrex does NOT parse C headers. \ No newline at end of file diff --git a/numpy/doc/pyrex/numpyx.c b/numpy/doc/pyrex/numpyx.c deleted file mode 100644 index e250eae19..000000000 --- a/numpy/doc/pyrex/numpyx.c +++ /dev/null @@ -1,1037 +0,0 @@ -/* Generated by Pyrex 0.9.5.1 on Wed Jan 31 11:57:10 2007 */ - -#include "Python.h" -#include "structmember.h" -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif -#ifdef __cplusplus -#define __PYX_EXTERN_C extern "C" -#else -#define __PYX_EXTERN_C extern -#endif -__PYX_EXTERN_C double pow(double, double); -#include "stdlib.h" -#include "numpy/arrayobject.h" - - -typedef struct {PyObject **p; char *s;} __Pyx_InternTabEntry; /*proto*/ -typedef struct {PyObject **p; char *s; long n;} __Pyx_StringTabEntry; /*proto*/ - -static PyObject *__pyx_m; -static PyObject *__pyx_b; -static int __pyx_lineno; -static char *__pyx_filename; -static char **__pyx_f; - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, char *name); /*proto*/ - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list); /*proto*/ - -static int __Pyx_PrintItem(PyObject *); /*proto*/ -static int __Pyx_PrintNewline(void); /*proto*/ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ - -static int __Pyx_InternStrings(__Pyx_InternTabEntry *t); /*proto*/ - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ - -static PyTypeObject *__Pyx_ImportType(char *module_name, char *class_name, long size); /*proto*/ - -static void __Pyx_AddTraceback(char *funcname); /*proto*/ - -/* Declarations from c_python */ - - -/* Declarations from c_numpy */ - -static PyTypeObject *__pyx_ptype_7c_numpy_dtype = 0; -static PyTypeObject *__pyx_ptype_7c_numpy_ndarray = 0; -static PyTypeObject *__pyx_ptype_7c_numpy_flatiter = 0; -static PyTypeObject *__pyx_ptype_7c_numpy_broadcast = 0; - -/* Declarations from numpyx */ - -static PyObject *(__pyx_f_6numpyx_print_elements(char (*),Py_intptr_t (*),Py_intptr_t (*),int ,int ,PyObject *)); /*proto*/ - - -/* Implementation of numpyx */ - - -static PyObject *__pyx_n_c_python; -static PyObject *__pyx_n_c_numpy; -static PyObject *__pyx_n_numpy; -static PyObject *__pyx_n_print_array_info; -static PyObject *__pyx_n_test_methods; -static PyObject *__pyx_n_test; - -static PyObject *__pyx_n_dtype; - -static PyObject *__pyx_k2p; -static PyObject *__pyx_k3p; -static PyObject *__pyx_k4p; -static PyObject *__pyx_k5p; -static PyObject *__pyx_k6p; -static PyObject *__pyx_k7p; -static PyObject *__pyx_k8p; -static PyObject *__pyx_k9p; - -static char (__pyx_k2[]) = "-="; -static char (__pyx_k3[]) = "printing array info for ndarray at 0x%0lx"; -static char (__pyx_k4[]) = "print number of dimensions:"; -static char (__pyx_k5[]) = "address of strides: 0x%0lx"; -static char (__pyx_k6[]) = "strides:"; -static char (__pyx_k7[]) = " stride %d:"; -static char (__pyx_k8[]) = "memory dump:"; -static char (__pyx_k9[]) = "-="; - -static PyObject *__pyx_f_6numpyx_print_array_info(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyObject *__pyx_f_6numpyx_print_array_info(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyArrayObject *__pyx_v_arr = 0; - int __pyx_v_i; - PyObject *__pyx_r; - PyObject *__pyx_1 = 0; - PyObject *__pyx_2 = 0; - int __pyx_3; - static char *__pyx_argnames[] = {"arr",0}; - if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_arr)) return 0; - Py_INCREF(__pyx_v_arr); - if (!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_arr), __pyx_ptype_7c_numpy_ndarray, 1, "arr")) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":13 */ - __pyx_1 = PyInt_FromLong(10); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; goto __pyx_L1;} - __pyx_2 = PyNumber_Multiply(__pyx_k2p, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":14 */ - __pyx_1 = PyInt_FromLong(((int )__pyx_v_arr)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; goto __pyx_L1;} - __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_2, 0, __pyx_1); - __pyx_1 = 0; - __pyx_1 = PyNumber_Remainder(__pyx_k3p, __pyx_2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintItem(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":15 */ - if (__Pyx_PrintItem(__pyx_k4p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; goto __pyx_L1;} - __pyx_2 = PyInt_FromLong(__pyx_v_arr->nd); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":16 */ - __pyx_1 = PyInt_FromLong(((int )__pyx_v_arr->strides)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; goto __pyx_L1;} - __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_2, 0, __pyx_1); - __pyx_1 = 0; - __pyx_1 = PyNumber_Remainder(__pyx_k5p, __pyx_2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintItem(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":17 */ - if (__Pyx_PrintItem(__pyx_k6p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;} - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":18 */ - __pyx_3 = __pyx_v_arr->nd; - for (__pyx_v_i = 0; __pyx_v_i < __pyx_3; ++__pyx_v_i) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":20 */ - __pyx_2 = PyInt_FromLong(__pyx_v_i); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - __pyx_1 = PyNumber_Remainder(__pyx_k7p, __pyx_2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintItem(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_2 = PyInt_FromLong(((int )(__pyx_v_arr->strides[__pyx_v_i]))); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - } - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":21 */ - if (__Pyx_PrintItem(__pyx_k8p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; goto __pyx_L1;} - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":22 */ - __pyx_1 = PyObject_GetAttr(((PyObject *)__pyx_v_arr), __pyx_n_dtype); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; goto __pyx_L1;} - __pyx_2 = __pyx_f_6numpyx_print_elements(__pyx_v_arr->data,__pyx_v_arr->strides,__pyx_v_arr->dimensions,__pyx_v_arr->nd,(sizeof(double )),__pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":24 */ - __pyx_1 = PyInt_FromLong(10); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;} - __pyx_2 = PyNumber_Multiply(__pyx_k9p, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":25 */ - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; goto __pyx_L1;} - - __pyx_r = Py_None; Py_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1:; - Py_XDECREF(__pyx_1); - Py_XDECREF(__pyx_2); - __Pyx_AddTraceback("numpyx.print_array_info"); - __pyx_r = 0; - __pyx_L0:; - Py_DECREF(__pyx_v_arr); - return __pyx_r; -} - -static PyObject *__pyx_n_object_; -static PyObject *__pyx_n_float64; -static PyObject *__pyx_n_name; - -static PyObject *__pyx_k10p; -static PyObject *__pyx_k11p; -static PyObject *__pyx_k12p; -static PyObject *__pyx_k13p; -static PyObject *__pyx_k14p; - -static char (__pyx_k10[]) = " print_elements() not (yet) implemented for dtype %s"; -static char (__pyx_k11[]) = " "; -static char (__pyx_k12[]) = " "; -static char (__pyx_k13[]) = " "; -static char (__pyx_k14[]) = " "; - -static PyObject *__pyx_f_6numpyx_print_elements(char (*__pyx_v_data),Py_intptr_t (*__pyx_v_strides),Py_intptr_t (*__pyx_v_dimensions),int __pyx_v_nd,int __pyx_v_elsize,PyObject *__pyx_v_dtype) { - Py_intptr_t __pyx_v_i; - void (*__pyx_v_elptr); - PyObject *__pyx_r; - PyObject *__pyx_1 = 0; - PyObject *__pyx_2 = 0; - PyObject *__pyx_3 = 0; - PyObject *__pyx_4 = 0; - int __pyx_5; - Py_intptr_t __pyx_6; - Py_INCREF(__pyx_v_dtype); - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":36 */ - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_dtype); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_1, __pyx_n_object_); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3); - __pyx_3 = 0; - __pyx_3 = PyObject_CallObject(__pyx_2, __pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_dtype); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - __pyx_4 = PyObject_GetAttr(__pyx_2, __pyx_n_float64); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_2, 0, __pyx_4); - __pyx_4 = 0; - __pyx_4 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_1 = PyList_New(2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - PyList_SET_ITEM(__pyx_1, 0, __pyx_3); - PyList_SET_ITEM(__pyx_1, 1, __pyx_4); - __pyx_3 = 0; - __pyx_4 = 0; - __pyx_5 = PySequence_Contains(__pyx_1, __pyx_v_dtype); if (__pyx_5 < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - __pyx_5 = !__pyx_5; - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":38 */ - __pyx_2 = PyObject_GetAttr(__pyx_v_dtype, __pyx_n_name); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; goto __pyx_L1;} - __pyx_3 = PyNumber_Remainder(__pyx_k10p, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintItem(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":39 */ - __pyx_r = Py_None; Py_INCREF(Py_None); - goto __pyx_L0; - goto __pyx_L2; - } - __pyx_L2:; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":41 */ - __pyx_5 = (__pyx_v_nd == 0); - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":42 */ - __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - __pyx_1 = PyObject_GetAttr(__pyx_4, __pyx_n_dtype); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_object_); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_4, 0, __pyx_3); - __pyx_3 = 0; - __pyx_2 = PyObject_CallObject(__pyx_1, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_4); __pyx_4 = 0; - if (PyObject_Cmp(__pyx_v_dtype, __pyx_2, &__pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - __pyx_5 = __pyx_5 == 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":43 */ - __pyx_v_elptr = (((void (*(*)))__pyx_v_data)[0]); - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":44 */ - if (__Pyx_PrintItem(__pyx_k11p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; goto __pyx_L1;} - __pyx_3 = (PyObject *)__pyx_v_elptr; - Py_INCREF(__pyx_3); - if (__Pyx_PrintItem(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; goto __pyx_L1;} - goto __pyx_L4; - } - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - __pyx_4 = PyObject_GetAttr(__pyx_1, __pyx_n_dtype); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_float64); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3); - __pyx_3 = 0; - __pyx_2 = PyObject_CallObject(__pyx_4, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (PyObject_Cmp(__pyx_v_dtype, __pyx_2, &__pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - __pyx_5 = __pyx_5 == 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":46 */ - if (__Pyx_PrintItem(__pyx_k12p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; goto __pyx_L1;} - __pyx_3 = PyFloat_FromDouble((((double (*))__pyx_v_data)[0])); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; goto __pyx_L1;} - goto __pyx_L4; - } - __pyx_L4:; - goto __pyx_L3; - } - __pyx_5 = (__pyx_v_nd == 1); - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":48 */ - __pyx_6 = (__pyx_v_dimensions[0]); - for (__pyx_v_i = 0; __pyx_v_i < __pyx_6; ++__pyx_v_i) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":49 */ - __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - __pyx_1 = PyObject_GetAttr(__pyx_4, __pyx_n_dtype); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_object_); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_4, 0, __pyx_3); - __pyx_3 = 0; - __pyx_2 = PyObject_CallObject(__pyx_1, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_4); __pyx_4 = 0; - if (PyObject_Cmp(__pyx_v_dtype, __pyx_2, &__pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - __pyx_5 = __pyx_5 == 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":50 */ - __pyx_v_elptr = (((void (*(*)))__pyx_v_data)[0]); - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":51 */ - if (__Pyx_PrintItem(__pyx_k13p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; goto __pyx_L1;} - __pyx_3 = (PyObject *)__pyx_v_elptr; - Py_INCREF(__pyx_3); - if (__Pyx_PrintItem(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; goto __pyx_L1;} - goto __pyx_L7; - } - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - __pyx_4 = PyObject_GetAttr(__pyx_1, __pyx_n_dtype); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_float64); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3); - __pyx_3 = 0; - __pyx_2 = PyObject_CallObject(__pyx_4, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (PyObject_Cmp(__pyx_v_dtype, __pyx_2, &__pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - __pyx_5 = __pyx_5 == 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":53 */ - if (__Pyx_PrintItem(__pyx_k14p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; goto __pyx_L1;} - __pyx_3 = PyFloat_FromDouble((((double (*))__pyx_v_data)[0])); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; goto __pyx_L1;} - goto __pyx_L7; - } - __pyx_L7:; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":54 */ - __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); - } - goto __pyx_L3; - } - /*else*/ { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":56 */ - __pyx_6 = (__pyx_v_dimensions[0]); - for (__pyx_v_i = 0; __pyx_v_i < __pyx_6; ++__pyx_v_i) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":57 */ - __pyx_4 = __pyx_f_6numpyx_print_elements(__pyx_v_data,(__pyx_v_strides + 1),(__pyx_v_dimensions + 1),(__pyx_v_nd - 1),__pyx_v_elsize,__pyx_v_dtype); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":58 */ - __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); - } - } - __pyx_L3:; - - __pyx_r = Py_None; Py_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1:; - Py_XDECREF(__pyx_1); - Py_XDECREF(__pyx_2); - Py_XDECREF(__pyx_3); - Py_XDECREF(__pyx_4); - __Pyx_AddTraceback("numpyx.print_elements"); - __pyx_r = 0; - __pyx_L0:; - Py_DECREF(__pyx_v_dtype); - return __pyx_r; -} - -static PyObject *__pyx_n_any; - -static PyObject *__pyx_k15p; -static PyObject *__pyx_k16p; -static PyObject *__pyx_k17p; - -static char (__pyx_k15[]) = "arr.any() :"; -static char (__pyx_k16[]) = "arr.nd :"; -static char (__pyx_k17[]) = "arr.flags :"; - -static PyObject *__pyx_f_6numpyx_test_methods(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6numpyx_test_methods[] = "Test a few attribute accesses for an array.\n \n This illustrates how the pyrex-visible object is in practice a strange\n hybrid of the C PyArrayObject struct and the python object. Some\n properties (like .nd) are visible here but not in python, while others\n like flags behave very differently: in python flags appears as a separate,\n object while here we see the raw int holding the bit pattern.\n\n This makes sense when we think of how pyrex resolves arr.foo: if foo is\n listed as a field in the c_numpy.ndarray struct description, it will be\n directly accessed as a C variable without going through Python at all.\n This is why for arr.flags, we see the actual int which holds all the flags\n as bit fields. However, for any other attribute not listed in the struct,\n it simply forwards the attribute lookup to python at runtime, just like\n python would (which means that AttributeError can be raised for\n non-existent attributes, for example)."; -static PyObject *__pyx_f_6numpyx_test_methods(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyArrayObject *__pyx_v_arr = 0; - PyObject *__pyx_r; - PyObject *__pyx_1 = 0; - PyObject *__pyx_2 = 0; - static char *__pyx_argnames[] = {"arr",0}; - if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_arr)) return 0; - Py_INCREF(__pyx_v_arr); - if (!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_arr), __pyx_ptype_7c_numpy_ndarray, 1, "arr")) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":78 */ - if (__Pyx_PrintItem(__pyx_k15p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; goto __pyx_L1;} - __pyx_1 = PyObject_GetAttr(((PyObject *)__pyx_v_arr), __pyx_n_any); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; goto __pyx_L1;} - __pyx_2 = PyObject_CallObject(__pyx_1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":79 */ - if (__Pyx_PrintItem(__pyx_k16p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; goto __pyx_L1;} - __pyx_1 = PyInt_FromLong(__pyx_v_arr->nd); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":80 */ - if (__Pyx_PrintItem(__pyx_k17p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; goto __pyx_L1;} - __pyx_2 = PyInt_FromLong(__pyx_v_arr->flags); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; goto __pyx_L1;} - - __pyx_r = Py_None; Py_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1:; - Py_XDECREF(__pyx_1); - Py_XDECREF(__pyx_2); - __Pyx_AddTraceback("numpyx.test_methods"); - __pyx_r = 0; - __pyx_L0:; - Py_DECREF(__pyx_v_arr); - return __pyx_r; -} - -static PyObject *__pyx_n_array; -static PyObject *__pyx_n_arange; -static PyObject *__pyx_n_shape; -static PyObject *__pyx_n_one; -static PyObject *__pyx_n_two; - - -static PyObject *__pyx_f_6numpyx_test(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6numpyx_test[] = "this function is pure Python"; -static PyObject *__pyx_f_6numpyx_test(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_arr1; - PyObject *__pyx_v_arr2; - PyObject *__pyx_v_arr3; - PyObject *__pyx_v_four; - PyObject *__pyx_v_arr4; - PyObject *__pyx_v_arr5; - PyObject *__pyx_v_arr; - PyObject *__pyx_r; - PyObject *__pyx_1 = 0; - PyObject *__pyx_2 = 0; - PyObject *__pyx_3 = 0; - PyObject *__pyx_4 = 0; - PyObject *__pyx_5 = 0; - static char *__pyx_argnames[] = {0}; - if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0; - __pyx_v_arr1 = Py_None; Py_INCREF(Py_None); - __pyx_v_arr2 = Py_None; Py_INCREF(Py_None); - __pyx_v_arr3 = Py_None; Py_INCREF(Py_None); - __pyx_v_four = Py_None; Py_INCREF(Py_None); - __pyx_v_arr4 = Py_None; Py_INCREF(Py_None); - __pyx_v_arr5 = Py_None; Py_INCREF(Py_None); - __pyx_v_arr = Py_None; Py_INCREF(Py_None); - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":84 */ - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_array); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_1 = PyFloat_FromDouble((-1e-30)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_3, 0, __pyx_1); - __pyx_1 = 0; - __pyx_1 = PyDict_New(); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - __pyx_5 = PyObject_GetAttr(__pyx_4, __pyx_n_float64); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - if (PyDict_SetItem(__pyx_1, __pyx_n_dtype, __pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - __pyx_4 = PyEval_CallObjectWithKeywords(__pyx_2, __pyx_3, __pyx_1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - Py_DECREF(__pyx_3); __pyx_3 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_v_arr1); - __pyx_v_arr1 = __pyx_4; - __pyx_4 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":85 */ - __pyx_5 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_2 = PyObject_GetAttr(__pyx_5, __pyx_n_array); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - __pyx_3 = PyFloat_FromDouble(1.0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_1 = PyFloat_FromDouble(2.0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_4 = PyFloat_FromDouble(3.0); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_5 = PyList_New(3); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - PyList_SET_ITEM(__pyx_5, 0, __pyx_3); - PyList_SET_ITEM(__pyx_5, 1, __pyx_1); - PyList_SET_ITEM(__pyx_5, 2, __pyx_4); - __pyx_3 = 0; - __pyx_1 = 0; - __pyx_4 = 0; - __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_3, 0, __pyx_5); - __pyx_5 = 0; - __pyx_1 = PyDict_New(); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_5 = PyObject_GetAttr(__pyx_4, __pyx_n_float64); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - if (PyDict_SetItem(__pyx_1, __pyx_n_dtype, __pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - __pyx_4 = PyEval_CallObjectWithKeywords(__pyx_2, __pyx_3, __pyx_1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - Py_DECREF(__pyx_3); __pyx_3 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_v_arr2); - __pyx_v_arr2 = __pyx_4; - __pyx_4 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":87 */ - __pyx_5 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - __pyx_2 = PyObject_GetAttr(__pyx_5, __pyx_n_arange); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - __pyx_3 = PyInt_FromLong(9); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3); - __pyx_3 = 0; - __pyx_4 = PyDict_New(); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - __pyx_5 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_5, __pyx_n_float64); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - if (PyDict_SetItem(__pyx_4, __pyx_n_dtype, __pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - __pyx_5 = PyEval_CallObjectWithKeywords(__pyx_2, __pyx_1, __pyx_4); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_v_arr3); - __pyx_v_arr3 = __pyx_5; - __pyx_5 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":88 */ - __pyx_3 = PyInt_FromLong(3); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;} - __pyx_2 = PyInt_FromLong(3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;} - __pyx_1 = PyTuple_New(2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3); - PyTuple_SET_ITEM(__pyx_1, 1, __pyx_2); - __pyx_3 = 0; - __pyx_2 = 0; - if (PyObject_SetAttr(__pyx_v_arr3, __pyx_n_shape, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":90 */ - __pyx_4 = PyInt_FromLong(4); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; goto __pyx_L1;} - Py_DECREF(__pyx_v_four); - __pyx_v_four = __pyx_4; - __pyx_4 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":91 */ - __pyx_5 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_5, __pyx_n_array); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - __pyx_2 = PyInt_FromLong(3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - __pyx_1 = PyList_New(4); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - Py_INCREF(__pyx_n_one); - PyList_SET_ITEM(__pyx_1, 0, __pyx_n_one); - Py_INCREF(__pyx_n_two); - PyList_SET_ITEM(__pyx_1, 1, __pyx_n_two); - PyList_SET_ITEM(__pyx_1, 2, __pyx_2); - Py_INCREF(__pyx_v_four); - PyList_SET_ITEM(__pyx_1, 3, __pyx_v_four); - __pyx_2 = 0; - __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_4, 0, __pyx_1); - __pyx_1 = 0; - __pyx_5 = PyDict_New(); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_object_); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (PyDict_SetItem(__pyx_5, __pyx_n_dtype, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_2 = PyEval_CallObjectWithKeywords(__pyx_3, __pyx_4, __pyx_5); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_5); __pyx_5 = 0; - Py_DECREF(__pyx_v_arr4); - __pyx_v_arr4 = __pyx_2; - __pyx_2 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":93 */ - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_1, __pyx_n_array); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_4 = PyInt_FromLong(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - __pyx_5 = PyInt_FromLong(2); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - __pyx_2 = PyInt_FromLong(3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - __pyx_1 = PyList_New(3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - PyList_SET_ITEM(__pyx_1, 0, __pyx_4); - PyList_SET_ITEM(__pyx_1, 1, __pyx_5); - PyList_SET_ITEM(__pyx_1, 2, __pyx_2); - __pyx_4 = 0; - __pyx_5 = 0; - __pyx_2 = 0; - __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_4, 0, __pyx_1); - __pyx_1 = 0; - __pyx_5 = PyObject_CallObject(__pyx_3, __pyx_4); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_v_arr5); - __pyx_v_arr5 = __pyx_5; - __pyx_5 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":95 */ - __pyx_2 = PyList_New(5); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; goto __pyx_L1;} - Py_INCREF(__pyx_v_arr1); - PyList_SET_ITEM(__pyx_2, 0, __pyx_v_arr1); - Py_INCREF(__pyx_v_arr2); - PyList_SET_ITEM(__pyx_2, 1, __pyx_v_arr2); - Py_INCREF(__pyx_v_arr3); - PyList_SET_ITEM(__pyx_2, 2, __pyx_v_arr3); - Py_INCREF(__pyx_v_arr4); - PyList_SET_ITEM(__pyx_2, 3, __pyx_v_arr4); - Py_INCREF(__pyx_v_arr5); - PyList_SET_ITEM(__pyx_2, 4, __pyx_v_arr5); - __pyx_1 = PyObject_GetIter(__pyx_2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - for (;;) { - __pyx_3 = PyIter_Next(__pyx_1); - if (!__pyx_3) { - if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; goto __pyx_L1;} - break; - } - Py_DECREF(__pyx_v_arr); - __pyx_v_arr = __pyx_3; - __pyx_3 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":96 */ - __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_print_array_info); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; goto __pyx_L1;} - __pyx_5 = PyTuple_New(1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; goto __pyx_L1;} - Py_INCREF(__pyx_v_arr); - PyTuple_SET_ITEM(__pyx_5, 0, __pyx_v_arr); - __pyx_2 = PyObject_CallObject(__pyx_4, __pyx_5); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_5); __pyx_5 = 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - } - Py_DECREF(__pyx_1); __pyx_1 = 0; - - __pyx_r = Py_None; Py_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1:; - Py_XDECREF(__pyx_1); - Py_XDECREF(__pyx_2); - Py_XDECREF(__pyx_3); - Py_XDECREF(__pyx_4); - Py_XDECREF(__pyx_5); - __Pyx_AddTraceback("numpyx.test"); - __pyx_r = 0; - __pyx_L0:; - Py_DECREF(__pyx_v_arr1); - Py_DECREF(__pyx_v_arr2); - Py_DECREF(__pyx_v_arr3); - Py_DECREF(__pyx_v_four); - Py_DECREF(__pyx_v_arr4); - Py_DECREF(__pyx_v_arr5); - Py_DECREF(__pyx_v_arr); - return __pyx_r; -} - -static __Pyx_InternTabEntry __pyx_intern_tab[] = { - {&__pyx_n_any, "any"}, - {&__pyx_n_arange, "arange"}, - {&__pyx_n_array, "array"}, - {&__pyx_n_c_numpy, "c_numpy"}, - {&__pyx_n_c_python, "c_python"}, - {&__pyx_n_dtype, "dtype"}, - {&__pyx_n_float64, "float64"}, - {&__pyx_n_name, "name"}, - {&__pyx_n_numpy, "numpy"}, - {&__pyx_n_object_, "object_"}, - {&__pyx_n_one, "one"}, - {&__pyx_n_print_array_info, "print_array_info"}, - {&__pyx_n_shape, "shape"}, - {&__pyx_n_test, "test"}, - {&__pyx_n_test_methods, "test_methods"}, - {&__pyx_n_two, "two"}, - {0, 0} -}; - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_k2p, __pyx_k2, sizeof(__pyx_k2)}, - {&__pyx_k3p, __pyx_k3, sizeof(__pyx_k3)}, - {&__pyx_k4p, __pyx_k4, sizeof(__pyx_k4)}, - {&__pyx_k5p, __pyx_k5, sizeof(__pyx_k5)}, - {&__pyx_k6p, __pyx_k6, sizeof(__pyx_k6)}, - {&__pyx_k7p, __pyx_k7, sizeof(__pyx_k7)}, - {&__pyx_k8p, __pyx_k8, sizeof(__pyx_k8)}, - {&__pyx_k9p, __pyx_k9, sizeof(__pyx_k9)}, - {&__pyx_k10p, __pyx_k10, sizeof(__pyx_k10)}, - {&__pyx_k11p, __pyx_k11, sizeof(__pyx_k11)}, - {&__pyx_k12p, __pyx_k12, sizeof(__pyx_k12)}, - {&__pyx_k13p, __pyx_k13, sizeof(__pyx_k13)}, - {&__pyx_k14p, __pyx_k14, sizeof(__pyx_k14)}, - {&__pyx_k15p, __pyx_k15, sizeof(__pyx_k15)}, - {&__pyx_k16p, __pyx_k16, sizeof(__pyx_k16)}, - {&__pyx_k17p, __pyx_k17, sizeof(__pyx_k17)}, - {0, 0, 0} -}; - -static struct PyMethodDef __pyx_methods[] = { - {"print_array_info", (PyCFunction)__pyx_f_6numpyx_print_array_info, METH_VARARGS|METH_KEYWORDS, 0}, - {"test_methods", (PyCFunction)__pyx_f_6numpyx_test_methods, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6numpyx_test_methods}, - {"test", (PyCFunction)__pyx_f_6numpyx_test, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6numpyx_test}, - {0, 0, 0, 0} -}; - -static void __pyx_init_filenames(void); /*proto*/ - -PyMODINIT_FUNC initnumpyx(void); /*proto*/ -PyMODINIT_FUNC initnumpyx(void) { - PyObject *__pyx_1 = 0; - __pyx_init_filenames(); - __pyx_m = Py_InitModule4("numpyx", __pyx_methods, 0, 0, PYTHON_API_VERSION); - if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; goto __pyx_L1;}; - __pyx_b = PyImport_AddModule("__builtin__"); - if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; goto __pyx_L1;}; - if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; goto __pyx_L1;}; - if (__Pyx_InternStrings(__pyx_intern_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; goto __pyx_L1;}; - if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; goto __pyx_L1;}; - __pyx_ptype_7c_numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr)); if (!__pyx_ptype_7c_numpy_dtype) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 76; goto __pyx_L1;} - __pyx_ptype_7c_numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject)); if (!__pyx_ptype_7c_numpy_ndarray) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 81; goto __pyx_L1;} - __pyx_ptype_7c_numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject)); if (!__pyx_ptype_7c_numpy_flatiter) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 90; goto __pyx_L1;} - __pyx_ptype_7c_numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject)); if (!__pyx_ptype_7c_numpy_broadcast) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 96; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":5 */ - __pyx_1 = __Pyx_Import(__pyx_n_numpy, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 5; goto __pyx_L1;} - if (PyObject_SetAttr(__pyx_m, __pyx_n_numpy, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 5; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":8 */ - import_array(); - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":82 */ - return; - __pyx_L1:; - Py_XDECREF(__pyx_1); - __Pyx_AddTraceback("numpyx"); -} - -static char *__pyx_filenames[] = { - "numpyx.pyx", - "c_numpy.pxd", -}; - -/* Runtime support code */ - -static void __pyx_init_filenames(void) { - __pyx_f = __pyx_filenames; -} - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, char *name) { - if (!type) { - PyErr_Format(PyExc_SystemError, "Missing type object"); - return 0; - } - if ((none_allowed && obj == Py_None) || PyObject_TypeCheck(obj, type)) - return 1; - PyErr_Format(PyExc_TypeError, - "Argument '%s' has incorrect type (expected %s, got %s)", - name, type->tp_name, obj->ob_type->tp_name); - return 0; -} - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) { - PyObject *__import__ = 0; - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - __import__ = PyObject_GetAttrString(__pyx_b, "__import__"); - if (!__import__) - goto bad; - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - module = PyObject_CallFunction(__import__, "OOOO", - name, global_dict, empty_dict, list); -bad: - Py_XDECREF(empty_list); - Py_XDECREF(__import__); - Py_XDECREF(empty_dict); - return module; -} - -static PyObject *__Pyx_GetStdout(void) { - PyObject *f = PySys_GetObject("stdout"); - if (!f) { - PyErr_SetString(PyExc_RuntimeError, "lost sys.stdout"); - } - return f; -} - -static int __Pyx_PrintItem(PyObject *v) { - PyObject *f; - - if (!(f = __Pyx_GetStdout())) - return -1; - if (PyFile_SoftSpace(f, 1)) { - if (PyFile_WriteString(" ", f) < 0) - return -1; - } - if (PyFile_WriteObject(v, f, Py_PRINT_RAW) < 0) - return -1; - if (PyString_Check(v)) { - char *s = PyString_AsString(v); - int len = PyString_Size(v); - if (len > 0 && - isspace(Py_CHARMASK(s[len-1])) && - s[len-1] != ' ') - PyFile_SoftSpace(f, 0); - } - return 0; -} - -static int __Pyx_PrintNewline(void) { - PyObject *f; - - if (!(f = __Pyx_GetStdout())) - return -1; - if (PyFile_WriteString("\n", f) < 0) - return -1; - PyFile_SoftSpace(f, 0); - return 0; -} - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { - PyObject *result; - result = PyObject_GetAttr(dict, name); - if (!result) - PyErr_SetObject(PyExc_NameError, name); - return result; -} - -static int __Pyx_InternStrings(__Pyx_InternTabEntry *t) { - while (t->p) { - *t->p = PyString_InternFromString(t->s); - if (!*t->p) - return -1; - ++t; - } - return 0; -} - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - if (!*t->p) - return -1; - ++t; - } - return 0; -} - -static PyTypeObject *__Pyx_ImportType(char *module_name, char *class_name, - long size) -{ - PyObject *py_module_name = 0; - PyObject *py_class_name = 0; - PyObject *py_name_list = 0; - PyObject *py_module = 0; - PyObject *result = 0; - - py_module_name = PyString_FromString(module_name); - if (!py_module_name) - goto bad; - py_class_name = PyString_FromString(class_name); - if (!py_class_name) - goto bad; - py_name_list = PyList_New(1); - if (!py_name_list) - goto bad; - Py_INCREF(py_class_name); - if (PyList_SetItem(py_name_list, 0, py_class_name) < 0) - goto bad; - py_module = __Pyx_Import(py_module_name, py_name_list); - if (!py_module) - goto bad; - result = PyObject_GetAttr(py_module, py_class_name); - if (!result) - goto bad; - if (!PyType_Check(result)) { - PyErr_Format(PyExc_TypeError, - "%s.%s is not a type object", - module_name, class_name); - goto bad; - } - if (((PyTypeObject *)result)->tp_basicsize != size) { - PyErr_Format(PyExc_ValueError, - "%s.%s does not appear to be the correct type object", - module_name, class_name); - goto bad; - } - goto done; -bad: - Py_XDECREF(result); - result = 0; -done: - Py_XDECREF(py_module_name); - Py_XDECREF(py_class_name); - Py_XDECREF(py_name_list); - return (PyTypeObject *)result; -} - -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" - -static void __Pyx_AddTraceback(char *funcname) { - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - PyObject *py_globals = 0; - PyObject *empty_tuple = 0; - PyObject *empty_string = 0; - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - - py_srcfile = PyString_FromString(__pyx_filename); - if (!py_srcfile) goto bad; - py_funcname = PyString_FromString(funcname); - if (!py_funcname) goto bad; - py_globals = PyModule_GetDict(__pyx_m); - if (!py_globals) goto bad; - empty_tuple = PyTuple_New(0); - if (!empty_tuple) goto bad; - empty_string = PyString_FromString(""); - if (!empty_string) goto bad; - py_code = PyCode_New( - 0, /*int argcount,*/ - 0, /*int nlocals,*/ - 0, /*int stacksize,*/ - 0, /*int flags,*/ - empty_string, /*PyObject *code,*/ - empty_tuple, /*PyObject *consts,*/ - empty_tuple, /*PyObject *names,*/ - empty_tuple, /*PyObject *varnames,*/ - empty_tuple, /*PyObject *freevars,*/ - empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - __pyx_lineno, /*int firstlineno,*/ - empty_string /*PyObject *lnotab*/ - ); - if (!py_code) goto bad; - py_frame = PyFrame_New( - PyThreadState_Get(), /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - py_globals, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - py_frame->f_lineno = __pyx_lineno; - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - Py_XDECREF(empty_tuple); - Py_XDECREF(empty_string); - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} diff --git a/numpy/doc/pyrex/numpyx.pyx b/numpy/doc/pyrex/numpyx.pyx deleted file mode 100644 index 068d251f6..000000000 --- a/numpy/doc/pyrex/numpyx.pyx +++ /dev/null @@ -1,101 +0,0 @@ -# -*- Mode: Python -*- Not really, but close enough -"""WARNING: this code is deprecated and slated for removal soon. See the -doc/cython directory for the replacement, which uses Cython (the actively -maintained version of Pyrex). -""" - -cimport c_python -cimport c_numpy -import numpy - -# Numpy must be initialized -c_numpy.import_array() - -def print_array_info(c_numpy.ndarray arr): - cdef int i - - print '-='*10 - print 'printing array info for ndarray at 0x%0lx'%(arr,) - print 'print number of dimensions:',arr.nd - print 'address of strides: 0x%0lx'%(arr.strides,) - print 'strides:' - for i from 0<=iarr.strides[i] - print 'memory dump:' - print_elements( arr.data, arr.strides, arr.dimensions, - arr.nd, sizeof(double), arr.dtype ) - print '-='*10 - print - -cdef print_elements(char *data, - c_python.Py_intptr_t* strides, - c_python.Py_intptr_t* dimensions, - int nd, - int elsize, - object dtype): - cdef c_python.Py_intptr_t i,j - cdef void* elptr - - if dtype not in [numpy.dtype(numpy.object_), - numpy.dtype(numpy.float64)]: - print ' print_elements() not (yet) implemented for dtype %s'%dtype.name - return - - if nd ==0: - if dtype==numpy.dtype(numpy.object_): - elptr = (data)[0] #[0] dereferences pointer in Pyrex - print ' ',elptr - elif dtype==numpy.dtype(numpy.float64): - print ' ',(data)[0] - elif nd == 1: - for i from 0<=idata)[0] - print ' ',elptr - elif dtype==numpy.dtype(numpy.float64): - print ' ',(data)[0] - data = data + strides[0] - else: - for i from 0<=ielsize and ->fields filled in appropriately. - - The itemsize attribute must return a number > 0. The fields - attribute must return a dictionary with at least "names" and - "formats" entries. The "formats" entry will be converted to a - "proper" descr->fields entry (all generic data-types converted to - ``PyArray_Descr *`` structure). - - -Reference counting for ``PyArray_Descr *`` objects. -``````````````````````````````````````````````````` - -Most functions that take ``PyArary_Descr *`` as arguments and return a -``PyObject *`` steal the reference unless otherwise noted in the code: - -Functions that return ``PyArray_Descr *`` objects return a new -reference. - -.. tip:: - - There is a new function and a new method of array objects both labelled - dtypescr which can be used to try out the ``PyArray_DescrConverter``. - diff --git a/numpy/doc/reference/basics.py b/numpy/doc/reference/basics.py deleted file mode 100644 index dfb8fe74d..000000000 --- a/numpy/doc/reference/basics.py +++ /dev/null @@ -1,137 +0,0 @@ -""" -============ -Array basics -============ - -Array types and conversions between types -========================================= - -Numpy supports a much greater variety of numerical types than Python does. -This section shows which are available, and how to modify an array's data-type. - -========== ========================================================= -Data type Description -========== ========================================================= -bool Boolean (True or False) stored as a byte -int Platform integer (normally either ``int32`` or ``int64``) -int8 Byte (-128 to 127) -int16 Integer (-32768 to 32767) -int32 Integer (-2147483648 to 2147483647) -int64 Integer (9223372036854775808 to 9223372036854775807) -uint8 Unsigned integer (0 to 255) -uint16 Unsigned integer (0 to 65535) -uint32 Unsigned integer (0 to 4294967295) -uint64 Unsigned integer (0 to 18446744073709551615) -float Shorthand for ``float64``. -float32 Single precision float: sign bit, 8 bits exponent, - 23 bits mantissa -float64 Double precision float: sign bit, 11 bits exponent, - 52 bits mantissa -complex Shorthand for ``complex128``. -complex64 Complex number, represented by two 32-bit floats (real - and imaginary components) -complex128 Complex number, represented by two 64-bit floats (real - and imaginary components) -========== ========================================================= - -Numpy numerical types are instances of ``dtype`` (data-type) objects, each -having unique characteristics. Once you have imported NumPy using - - :: - - >>> import numpy as np - -the dtypes are available as ``np.bool``, ``np.float32``, etc. - -Advanced types, not listed in the table above, are explored in -section `link_here`. - -There are 5 basic numerical types representing booleans (bool), integers (int), -unsigned integers (uint) floating point (float) and complex. Those with numbers -in their name indicate the bitsize of the type (i.e. how many bits are needed -to represent a single value in memory). Some types, such as ``int`` and -``intp``, have differing bitsizes, dependent on the platforms (e.g. 32-bit -vs. 64-bit machines). This should be taken into account when interfacing -with low-level code (such as C or Fortran) where the raw memory is addressed. - -Data-types can be used as functions to convert python numbers to array scalars -(see the array scalar section for an explanation), python sequences of numbers -to arrays of that type, or as arguments to the dtype keyword that many numpy -functions or methods accept. Some examples:: - - >>> import numpy as np - >>> x = np.float32(1.0) - >>> x - 1.0 - >>> y = np.int_([1,2,4]) - >>> y - array([1, 2, 4]) - >>> z = np.arange(3, dtype=np.uint8) - array([0, 1, 2], dtype=uint8) - -Array types can also be referred to by character codes, mostly to retain -backward compatibility with older packages such as Numeric. Some -documentation may still refer to these, for example:: - - >>> np.array([1, 2, 3], dtype='f') - array([ 1., 2., 3.], dtype=float32) - -We recommend using dtype objects instead. - -To convert the type of an array, use the .astype() method (preferred) or -the type itself as a function. For example: :: - - >>> z.astype(float) - array([0., 1., 2.]) - >>> np.int8(z) - array([0, 1, 2], dtype=int8) - -Note that, above, we use the *Python* float object as a dtype. NumPy knows -that ``int`` refers to ``np.int``, ``bool`` means ``np.bool`` and -that ``float`` is ``np.float``. The other data-types do not have Python -equivalents. - -To determine the type of an array, look at the dtype attribute:: - - >>> z.dtype - dtype('uint8') - -dtype objects also contain information about the type, such as its bit-width -and its byte-order. See xxx for details. The data type can also be used -indirectly to query properties of the type, such as whether it is an integer:: - - >>> d = np.dtype(int) - >>> d - dtype('int32') - - >>> np.issubdtype(d, int) - True - - >>> np.issubdtype(d, float) - False - - -Array Scalars -============= - -Numpy generally returns elements of arrays as array scalars (a scalar -with an associated dtype). Array scalars differ from Python scalars, but -for the most part they can be used interchangeably (the primary -exception is for versions of Python older than v2.x, where integer array -scalars cannot act as indices for lists and tuples). There are some -exceptions, such as when code requires very specific attributes of a scalar -or when it checks specifically whether a value is a Python scalar. Generally, -problems are easily fixed by explicitly converting array scalars -to Python scalars, using the corresponding Python type function -(e.g., ``int``, ``float``, ``complex``, ``str``, ``unicode``). - -The primary advantage of using array scalars is that -they preserve the array type (Python may not have a matching scalar type -available, e.g. ``int16``). Therefore, the use of array scalars ensures -identical behaviour between arrays and scalars, irrespective of whether the -value is inside an array or not. NumPy scalars also have many of the same -methods arrays do. - -See xxx for details. - -""" diff --git a/numpy/doc/reference/broadcasting.py b/numpy/doc/reference/broadcasting.py deleted file mode 100644 index 95e9b67f9..000000000 --- a/numpy/doc/reference/broadcasting.py +++ /dev/null @@ -1,176 +0,0 @@ -""" -======================== -Broadcasting over arrays -======================== - -The term broadcasting describes how numpy treats arrays with different -shapes during arithmetic operations. Subject to certain constraints, -the smaller array is "broadcast" across the larger array so that they -have compatible shapes. Broadcasting provides a means of vectorizing -array operations so that looping occurs in C instead of Python. It does -this without making needless copies of data and usually leads to -efficient algorithm implementations. There are, however, cases where -broadcasting is a bad idea because it leads to inefficient use of memory -that slows computation. - -NumPy operations are usually done element-by-element, which requires two -arrays to have exactly the same shape:: - - >>> a = np.array([1.0, 2.0, 3.0]) - >>> b = np.array([2.0, 2.0, 2.0]) - >>> a * b - array([ 2., 4., 6.]) - -NumPy's broadcasting rule relaxes this constraint when the arrays' -shapes meet certain constraints. The simplest broadcasting example occurs -when an array and a scalar value are combined in an operation: - ->>> a = np.array([1.0, 2.0, 3.0]) ->>> b = 2.0 ->>> a * b -array([ 2., 4., 6.]) - -The result is equivalent to the previous example where ``b`` was an array. -We can think of the scalar ``b`` being *stretched* during the arithmetic -operation into an array with the same shape as ``a``. The new elements in -``b`` are simply copies of the original scalar. The stretching analogy is -only conceptual. NumPy is smart enough to use the original scalar value -without actually making copies, so that broadcasting operations are as -memory and computationally efficient as possible. - -The second example is more effective than the first, since here broadcasting -moves less memory around during the multiplication (``b`` is a scalar, -not an array). - -General Broadcasting Rules -========================== -When operating on two arrays, NumPy compares their shapes element-wise. -It starts with the trailing dimensions, and works its way forward. Two -dimensions are compatible when - -1) they are equal, or -2) one of them is 1 - -If these conditions are not met, a -``ValueError: frames are not aligned`` exception is thrown, indicating that -the arrays have incompatible shapes. The size of the resulting array -is the maximum size along each dimension of the input arrays. - -Arrays do not need to have the same *number* of dimensions. For example, -if you have a ``256x256x3`` array of RGB values, and you want to scale -each color in the image by a different value, you can multiply the image -by a one-dimensional array with 3 values. Lining up the sizes of the -trailing axes of these arrays according to the broadcast rules, shows that -they are compatible:: - - Image (3d array): 256 x 256 x 3 - Scale (1d array): 3 - Result (3d array): 256 x 256 x 3 - -When either of the dimensions compared is one, the larger of the two is -used. In other words, the smaller of two axes is stretched or "copied" -to match the other. - -In the following example, both the ``A`` and ``B`` arrays have axes with -length one that are expanded to a larger size during the broadcast -operation:: - - A (4d array): 8 x 1 x 6 x 1 - B (3d array): 7 x 1 x 5 - Result (4d array): 8 x 7 x 6 x 5 - -Here are some more examples:: - - A (2d array): 5 x 4 - B (1d array): 1 - Result (2d array): 5 x 4 - - A (2d array): 5 x 4 - B (1d array): 4 - Result (2d array): 5 x 4 - - A (3d array): 15 x 3 x 5 - B (3d array): 15 x 1 x 5 - Result (3d array): 15 x 3 x 5 - - A (3d array): 15 x 3 x 5 - B (2d array): 3 x 5 - Result (3d array): 15 x 3 x 5 - - A (3d array): 15 x 3 x 5 - B (2d array): 3 x 1 - Result (3d array): 15 x 3 x 5 - -Here are examples of shapes that do not broadcast:: - - A (1d array): 3 - B (1d array): 4 # trailing dimensions do not match - - A (2d array): 2 x 1 - B (3d array): 8 x 4 x 3 # second from last dimensions mismatch - -An example of broadcasting in practice:: - - >>> x = np.arange(4) - >>> xx = x.reshape(4,1) - >>> y = np.ones(5) - >>> z = np.ones((3,4)) - - >>> x.shape - (4,) - - >>> y.shape - (5,) - - >>> x + y - : shape mismatch: objects cannot be broadcast to a single shape - - >>> xx.shape - (4, 1) - - >>> y.shape - (5,) - - >>> (xx + y).shape - (4, 5) - - >>> xx + y - array([[ 1., 1., 1., 1., 1.], - [ 2., 2., 2., 2., 2.], - [ 3., 3., 3., 3., 3.], - [ 4., 4., 4., 4., 4.]]) - - >>> x.shape - (4,) - - >>> z.shape - (3, 4) - - >>> (x + z).shape - (3, 4) - - >>> x + z - array([[ 1., 2., 3., 4.], - [ 1., 2., 3., 4.], - [ 1., 2., 3., 4.]]) - -Broadcasting provides a convenient way of taking the outer product (or -any other outer operation) of two arrays. The following example shows an -outer addition operation of two 1-d arrays:: - - >>> a = np.array([0.0, 10.0, 20.0, 30.0]) - >>> b = np.array([1.0, 2.0, 3.0]) - >>> a[:, np.newaxis] + b - array([[ 1., 2., 3.], - [ 11., 12., 13.], - [ 21., 22., 23.], - [ 31., 32., 33.]]) - -Here the ``newaxis`` index operator inserts a new axis into ``a``, -making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array -with ``b``, which has shape ``(3,)``, yields a ``4x3`` array. - -See `this article `_ -for illustrations of broadcasting concepts. - -""" diff --git a/numpy/doc/reference/creation.py b/numpy/doc/reference/creation.py deleted file mode 100644 index 1e80e5115..000000000 --- a/numpy/doc/reference/creation.py +++ /dev/null @@ -1,132 +0,0 @@ -""" -============== -Array creation -============== - -Introduction -============ - -There are 5 general mechanisms for creating arrays: - -1) Conversion from other Python structures (e.g., lists, tuples) -2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros, etc.) -3) Reading arrays from disk, either from standard or custom formats -4) Creating arrays from raw bytes through the use of strings or buffers -5) Use of special library functions (e.g., random) - -This section will not cover means of replicating, joining, or otherwise -expanding or mutating existing arrays. Nor will it cover creating object -arrays or record arrays. Both of those are covered in their own sections. - -Converting Python array-like objects to numpy arrays -==================================================== - -In general, numerical data arranged in an array-like structure in Python can -be converted to arrays through the use of the array() function. The most obvious -examples are lists and tuples. See the documentation for array() for details for -its use. Some -objects may support the array-protocol and allow conversion to arrays this -way. A simple way to find out if the object can be converted to a numpy array -using array() is simply to try it interactively and see if it works! (The -Python Way). - -Examples: :: - - >>> x = np.array([2,3,1,0]) - >>> x = np.array([2, 3, 1, 0]) - >>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists, and types - >>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]]) - -Intrinsic numpy array creation -============================== - -Numpy has built-in functions for creating arrays from scratch: - -zeros(shape) will create an array filled with 0 values with the specified -shape. The default dtype is float64. - -``>>> np.zeros((2, 3)) -array([[ 0., 0., 0.], [ 0., 0., 0.]])`` - -ones(shape) will create an array filled with 1 values. It is identical to -zeros in all other respects. - -arange() will create arrays with regularly incrementing values. Check the -docstring for complete information on the various ways it can be used. A few -examples will be given here: :: - - >>> np.arange(10) - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.arange(2, 10, dtype=np.float) - array([ 2., 3., 4., 5., 6., 7., 8., 9.]) - >>> np.arange(2, 3, 0.1) - array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9]) - -Note that there are some subtleties regarding the last usage that the user -should be aware of that are described in the arange docstring. - -indices() will create a set of arrays (stacked as a one-higher dimensioned -array), one per dimension with each representing variation in that dimension. -An examples illustrates much better than a verbal description: :: - - >>> np.indices((3,3)) - array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]]) - -This is particularly useful for evaluating functions of multiple dimensions on -a regular grid. - -Reading arrays from disk -======================== - -This is presumably the most common case of large array creation. The details, -of course, depend greatly on the format of data on disk and so this section -can only give general pointers on how to handle various formats. - -Standard binary formats ------------------------ - -Various fields have standard formats for array data. The following lists the -ones with known python libraries to read them and return numpy arrays (there -may be others for which it is possible to read and convert to numpy arrays so -check the last section as well) - -HDF5: PyTables -FITS: PyFITS -Others? xxx - -Examples of formats that cannot be read directly but for which it is not hard -to convert are libraries like PIL (able to read and write many image formats -such as jpg, png, etc). - -Common ascii formats --------------------- - -Comma Separated Value files (CSV) are widely used (and an export and import -option for programs like Excel). There are a number of ways of reading these -files in Python. The most convenient ways of reading these are found in pylab -(part of matplotlib) in the xxx function. (list alternatives xxx) - -More generic ascii files can be read using the io package in scipy. xxx a few -more details needed... - -Custom binary formats ---------------------- - -There are a variety of approaches one can use. If the file has a relatively -simple format then one can write a simple I/O library and use the numpy -fromfile() function and .tofile() method to read and write numpy arrays -directly (mind your byteorder though!) If a good C or C++ library exists that -read the data, one can wrap that library with a variety of techniques (see -xxx) though that certainly is much more work and requires significantly more -advanced knowledge to interface with C or C++. - -Use of special libraries ------------------------- - -There are libraries that can be used to generate arrays for special purposes -and it isn't possible to enumerate all of them. The most common uses are use -of the many array generation functions in random that can generate arrays of -random values, and some utility functions to generate special matrices (e.g. -diagonal, see xxx) - -""" diff --git a/numpy/doc/reference/glossary.py b/numpy/doc/reference/glossary.py deleted file mode 100644 index 6a182adf4..000000000 --- a/numpy/doc/reference/glossary.py +++ /dev/null @@ -1,367 +0,0 @@ -""" -================= -Glossary -================= - -along an axis - Axes are defined for arrays with more than one dimension. A - 2-dimensional array has two corresponding axes: the first running - vertically downwards across rows (axis 0), and the second running - horizontally across columns (axis 1). - - Many operation can take place along one of these axes. For example, - we can sum each row of an array, in which case we operate along - columns, or axis 1:: - - >>> x = np.arange(12).reshape((3,4)) - - >>> x - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - - >>> x.sum(axis=1) - array([ 6, 22, 38]) - -array or ndarray - A homogeneous container of numerical elements. Each element in the - array occupies a fixed amount of memory (hence homogeneous), and - can be a numerical element of a single type (such as float, int - or complex) or a combination (such as ``(float, int, float)``). Each - array has an associated data-type (or ``dtype``), which describes - the numerical type of its elements:: - - >>> x = np.array([1, 2, 3], float) - - >>> x - array([ 1., 2., 3.]) - - >>> x.dtype # floating point number, 64 bits of memory per element - dtype('float64') - - - # More complicated data type: each array element is a combination of - # and integer and a floating point number - >>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)]) - array([(1, 2.0), (3, 4.0)], - dtype=[('x', '>> x = np.array([1, 2, 3]) - >>> x.shape - (3,) - -broadcast - NumPy can do operations on arrays whose shapes are mismatched:: - - >>> x = np.array([1, 2]) - >>> y = np.array([[3], [4]]) - - >>> x - array([1, 2]) - - >>> y - array([[3], - [4]]) - - >>> x + y - array([[4, 5], - [5, 6]]) - - See `doc.broadcasting`_ for more information. - -decorator - An operator that transforms a function. For example, a ``log`` - decorator may be defined to print debugging information upon - function execution:: - - >>> def log(f): - ... def new_logging_func(*args, **kwargs): - ... print "Logging call with parameters:", args, kwargs - ... return f(*args, **kwargs) - ... - ... return new_logging_func - - Now, when we define a function, we can "decorate" it using ``log``:: - - >>> @log - ... def add(a, b): - ... return a + b - - Calling ``add`` then yields: - - >>> add(1, 2) - Logging call with parameters: (1, 2) {} - 3 - -dictionary - Resembling a language dictionary, which provides a mapping between - words and descriptions thereof, a Python dictionary is a mapping - between two objects:: - - >>> x = {1: 'one', 'two': [1, 2]} - - Here, `x` is a dictionary mapping keys to values, in this case - the integer 1 to the string "one", and the string "two" to - the list ``[1, 2]``. The values may be accessed using their - corresponding keys:: - - >>> x[1] - 'one' - - >>> x['two'] - [1, 2] - - Note that dictionaries are not stored in any specific order. Also, - most mutable (see *immutable* below) objects, such as lists, may not - be used as keys. - - For more information on dictionaries, read the - `Python tutorial `_. - -immutable - An object that cannot be modified after execution is called - immutable. Two common examples are strings and tuples. - -instance - A class definition gives the blueprint for constructing an object:: - - >>> class House(object): - ... wall_colour = 'white' - - Yet, we have to *build* a house before it exists:: - - >>> h = House() # build a house - - Now, ``h`` is called a ``House`` instance. An instance is therefore - a specific realisation of a class. - -iterable - A sequence that allows "walking" (iterating) over items, typically - using a loop such as:: - - >>> x = [1, 2, 3] - >>> [item**2 for item in x] - [1, 4, 9] - - It is often used in combintion with ``enumerate``:: - - >>> for n, k in enumerate(keys): - ... print "Key %d: %s" % (n, k) - ... - Key 0: a - Key 1: b - Key 2: c - -list - A Python container that can hold any number of objects or items. - The items do not have to be of the same type, and can even be - lists themselves:: - - >>> x = [2, 2.0, "two", [2, 2.0]] - - The list `x` contains 4 items, each which can be accessed individually:: - - >>> x[2] # the string 'two' - 'two' - - >>> x[3] # a list, containing an integer 2 and a float 2.0 - [2, 2.0] - - It is also possible to select more than one item at a time, - using *slicing*:: - - >>> x[0:2] # or, equivalently, x[:2] - [2, 2.0] - - In code, arrays are often conveniently expressed as nested lists:: - - - >>> np.array([[1, 2], [3, 4]]) - array([[1, 2], - [3, 4]]) - - For more information, read the section on lists in the `Python - tutorial `_. For a mapping - type (key-value), see *dictionary*. - -mask - A boolean array, used to select only certain elements for an operation:: - - >>> x = np.arange(5) - >>> x - array([0, 1, 2, 3, 4]) - - >>> mask = (x > 2) - >>> mask - array([False, False, False, True, True], dtype=bool) - - >>> x[mask] = -1 - >>> x - array([ 0, 1, 2, -1, -1]) - -masked array - Array that suppressed values indicated by a mask:: - - >>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True]) - >>> x - masked_array(data = [-- 2.0 --], - mask = [ True False True], - fill_value=1e+20) - - >>> x + [1, 2, 3] - masked_array(data = [-- 4.0 --], - mask = [ True False True], - fill_value=1e+20) - - Masked arrays are often used when operating on arrays containing - missing or invalid entries. - -matrix - A 2-dimensional ndarray that preserves its two-dimensional nature - throughout operations. It has certain special operations, such as ``*`` - (matrix multiplication) and ``**`` (matrix power), defined:: - - >>> x = np.mat([[1, 2], [3, 4]]) - - >>> x - matrix([[1, 2], - [3, 4]]) - - >>> x**2 - matrix([[ 7, 10], - [15, 22]]) - -method - A function associated with an object. For example, each ndarray has a - method called ``repeat``:: - - >>> x = np.array([1, 2, 3]) - - >>> x.repeat(2) - array([1, 1, 2, 2, 3, 3]) - -reference - If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore, - ``a`` and ``b`` are different names for the same Python object. - -self - Often seen in method signatures, ``self`` refers to the instance - of the associated class. For example: - - >>> class Paintbrush(object): - ... color = 'blue' - ... - ... def paint(self): - ... print "Painting the city %s!" % self.color - ... - >>> p = Paintbrush() - >>> p.color = 'red' - >>> p.paint() # self refers to 'p' - Painting the city red! - -slice - Used to select only certain elements from a sequence:: - - >>> x = range(5) - >>> x - [0, 1, 2, 3, 4] - - >>> x[1:3] # slice from 1 to 3 (excluding 3 itself) - [1, 2] - - >>> x[1:5:2] # slice from 1 to 5, but skipping every second element - [1, 3] - - >>> x[::-1] # slice a sequence in reverse - [4, 3, 2, 1, 0] - - Arrays may have more than one dimension, each which can be sliced - individually:: - - >>> x = np.array([[1, 2], [3, 4]]) - >>> x - array([[1, 2], - [3, 4]]) - - >>> x[:, 1] - array([2, 4]) - -tuple - A sequence that may contain a variable number of types of any - kind. A tuple is immutable, i.e., once constructed it cannot be - changed. Similar to a list, it can be indexed and sliced:: - - >>> x = (1, 'one', [1, 2]) - - >>> x - (1, 'one', [1, 2]) - - >>> x[0] - 1 - - >>> x[:2] - (1, 'one') - - A useful concept is "tuple unpacking", which allows variables to - be assigned to the contents of a tuple:: - - >>> x, y = (1, 2) - >>> x, y = 1, 2 - - This is often used when a function returns multiple values: - - >>> def return_many(): - ... return 1, 'alpha' - - >>> a, b, c = return_many() - >>> a, b, c - (1, 'alpha', None) - - >>> a - 1 - >>> b - 'alpha' - -ufunc - Universal function. A fast element-wise array operation. Examples include - ``add``, ``sin`` and ``logical_or``. - -view - An array that does not own its data, but refers to another array's - data instead. For example, we may create a view that only shows - every second element of another array:: - - >>> x = np.arange(5) - >>> x - array([0, 1, 2, 3, 4]) - - >>> y = x[::2] - >>> y - array([0, 2, 4]) - - >>> x[0] = 3 # changing x changes y as well, since y is a view on x - >>> y - array([3, 2, 4]) - -wrapper - Python is a high-level (highly abstracted, or English-like) language. - This abstraction comes at a price in execution speed, and sometimes - it becomes necessary to use lower level languages to do fast - computations. A wrapper is code that provides a bridge between - high and the low level languages, allowing, e.g., Python to execute - code written in C or Fortran. - - Examples include ctypes, SWIG and Cython (which wraps C and C++) - and f2py (which wraps Fortran). - -""" diff --git a/numpy/doc/reference/howtofind.py b/numpy/doc/reference/howtofind.py deleted file mode 100644 index 29ad05318..000000000 --- a/numpy/doc/reference/howtofind.py +++ /dev/null @@ -1,9 +0,0 @@ -""" - -================= -How to Find Stuff -================= - -How to find things in NumPy. - -""" diff --git a/numpy/doc/reference/indexing.py b/numpy/doc/reference/indexing.py deleted file mode 100644 index 365edd67a..000000000 --- a/numpy/doc/reference/indexing.py +++ /dev/null @@ -1,384 +0,0 @@ -""" -============== -Array indexing -============== - -Array indexing refers to any use of the square brackets ([]) to index -array values. There are many options to indexing, which give numpy -indexing great power, but with power comes some complexity and the -potential for confusion. This section is just an overview of the -various options and issues related to indexing. Aside from single -element indexing, the details on most of these options are to be -found in related sections. - -Assignment vs referencing -========================= - -Most of the following examples show the use of indexing when referencing -data in an array. The examples work just as well when assigning to an -array. See the section at the end for specific examples and explanations -on how assignments work. - -Single element indexing -======================= - -Single element indexing for a 1-D array is what one expects. It work -exactly like that for other standard Python sequences. It is 0-based, -and accepts negative indices for indexing from the end of the array. :: - - >>> x = np.arange(10) - >>> x[2] - 2 - >>> x[-2] - 8 - -Unlike lists and tuples, numpy arrays support multidimensional indexing -for multidimensional arrays. That means that it is not necessary to -separate each dimension's index into its own set of square brackets. :: - - >>> x.shape = (2,5) # now x is 2-dimensional - >>> x[1,3] - 8 - >>> x[1,-1] - 9 - -Note that if one indexes a multidimensional array with fewer indices -than dimensions, one gets a subdimensional array. For example: :: - - >>> x[0] - array([0, 1, 2, 3, 4]) - -That is, each index specified selects the array corresponding to the rest -of the dimensions selected. In the above example, choosing 0 means that -remaining dimension of lenth 5 is being left unspecified, and that what -is returned is an array of that dimensionality and size. It must be noted -that the returned array is not a copy of the original, but points to the -same values in memory as does the original array (a new view of the same -data in other words, see xxx for details). In this case, -the 1-D array at the first position (0) is returned. So using a single -index on the returned array, results in a single element being returned. -That is: :: - - >>> x[0][2] - 2 - -So note that ``x[0,2] = x[0][2]`` though the second case is more inefficient -a new temporary array is created after the first index that is subsequently -indexed by 2. - -Note to those used to IDL or Fortran memory order as it relates to indexing. -Numpy uses C-order indexing. That means that the last index usually (see -xxx for exceptions) represents the most rapidly changing memory location, -unlike Fortran or IDL, where the first index represents the most rapidly -changing location in memory. This difference represents a great potential -for confusion. - -Other indexing options -====================== - -It is possible to slice and stride arrays to extract arrays of the same -number of dimensions, but of different sizes than the original. The slicing -and striding works exactly the same way it does for lists and tuples except -that they can be applied to multiple dimensions as well. A few -examples illustrates best: :: - - >>> x = np.arange(10) - >>> x[2:5] - array([2, 3, 4]) - >>> x[:-7] - array([0, 1, 2]) - >>> x[1:7:2] - array([1,3,5]) - >>> y = np.arange(35).reshape(5,7) - >>> y[1:5:2,::3] - array([[ 7, 10, 13], - [21, 24, 27]]) - -Note that slices of arrays do not copy the internal array data but -also produce new views of the original data (see xxx for more -explanation of this issue). - -It is possible to index arrays with other arrays for the purposes of -selecting lists of values out of arrays into new arrays. There are two -different ways of accomplishing this. One uses one or more arrays of -index values (see xxx for details). The other involves giving a boolean -array of the proper shape to indicate the values to be selected. -Index arrays are a very powerful tool that allow one to avoid looping -over individual elements in arrays and thus greatly improve performance -(see xxx for examples) - -It is possible to use special features to effectively increase the -number of dimensions in an array through indexing so the resulting -array aquires the shape needed for use in an expression or with a -specific function. See xxx. - -Index arrays -============ - -Numpy arrays may be indexed with other arrays (or any other sequence-like -object that can be converted to an array, such as lists, with the exception -of tuples; see the end of this document for why this is). The use of index -arrays ranges from simple, straightforward cases to complex, hard-to-understand -cases. For all cases of index arrays, what is returned is a copy of the -original data, not a view as one gets for slices. - -Index arrays must be of integer type. Each value in the array indicates which -value in the array to use in place of the index. To illustrate: :: - - >>> x = np.arange(10,1,-1) - >>> x - array([10, 9, 8, 7, 6, 5, 4, 3, 2]) - >>> x[np.array([3, 3, 1, 8])] - array([7, 7, 9, 2]) - - -The index array consisting of the values 3, 3, 1 and 8 correspondingly create -an array of length 4 (same as the index array) where each index is replaced by -the value the index array has in the array being indexed. - -Negative values are permitted and work as they do with single indices or slices: :: - - >>> x[np.array([3,3,-3,8])] - array([7, 7, 4, 2]) - -It is an error to have index values out of bounds: :: - - >>> x[np.array([3, 3, 20, 8])] - : index 20 out of bounds 0<=index<9 - -Generally speaking, what is returned when index arrays are used is an array with -the same shape as the index array, but with the type and values of the array being -indexed. As an example, we can use a multidimensional index array instead: :: - - >>> x[np.array([[1,1],[2,3]])] - array([[9, 9], - [8, 7]]) - -Indexing Multi-dimensional arrays -================================= - -Things become more complex when multidimensional arrays are indexed, particularly -with multidimensional index arrays. These tend to be more unusal uses, but they -are permitted, and they are useful for some problems. We'll start with the -simplest multidimensional case (using the array y from the previous examples): :: - - >>> y[np.array([0,2,4]), np.array([0,1,2])] - array([ 0, 15, 30]) - -In this case, if the index arrays have a matching shape, and there is an index -array for each dimension of the array being indexed, the resultant array has the -same shape as the index arrays, and the values correspond to the index set for each -position in the index arrays. In this example, the first index value is 0 for both -index arrays, and thus the first value of the resultant array is y[0,0]. The next -value is y[2,1], and the last is y[4,2]. - -If the index arrays do not have the same shape, there is an attempt to broadcast -them to the same shape. Broadcasting won't be discussed here but is discussed in -detail in xxx. If they cannot be broadcast to the same shape, an exception is -raised: :: - - >>> y[np.array([0,2,4]), np.array([0,1])] - : shape mismatch: objects cannot be broadcast to a single shape - -The broadcasting mechanism permits index arrays to be combined with scalars for -other indices. The effect is that the scalar value is used for all the corresponding -values of the index arrays: :: - - >>> y[np.array([0,2,4]), 1] - array([ 1, 15, 29]) - -Jumping to the next level of complexity, it is possible to only partially index an array -with index arrays. It takes a bit of thought to understand what happens in such cases. -For example if we just use one index array with y: :: - - >>> y[np.array([0,2,4])] - array([[ 0, 1, 2, 3, 4, 5, 6], - [14, 15, 16, 17, 18, 19, 20], - [28, 29, 30, 31, 32, 33, 34]]) - -What results is the construction of a new array where each value of the index array -selects one row from the array being indexed and the resultant array has the resulting -shape (size of row, number index elements). - -An example of where this may be useful is for a color lookup table where we want to map -the values of an image into RGB triples for display. The lookup table could have a shape -(nlookup, 3). Indexing such an array with an image with shape (ny, nx) with dtype=np.uint8 -(or any integer type so long as values are with the bounds of the lookup table) will -result in an array of shape (ny, nx, 3) where a triple of RGB values is associated with -each pixel location. - -In general, the shape of the resulant array will be the concatenation of the shape of -the index array (or the shape that all the index arrays were broadcast to) with the -shape of any unused dimensions (those not indexed) in the array being indexed. - -Boolean or "mask" index arrays -============================== - -Boolean arrays used as indices are treated in a different manner entirely than index -arrays. Boolean arrays must be of the same shape as the array being indexed, or -broadcastable to the same shape. In the most straightforward case, the boolean array -has the same shape: :: - - >>> b = y>20 - >>> y[b] - array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]) - -The result is a 1-D array containing all the elements in the indexed array corresponding -to all the true elements in the boolean array. As with index arrays, what is returned -is a copy of the data, not a view as one gets with slices. - -With broadcasting, multidimesional arrays may be the result. For example: :: - - >>> b[:,5] # use a 1-D boolean that broadcasts with y - array([False, False, False, True, True], dtype=bool) - >>> y[b[:,5]] - array([[21, 22, 23, 24, 25, 26, 27], - [28, 29, 30, 31, 32, 33, 34]]) - -Here the 4th and 5th rows are selected from the indexed array and combined to make a -2-D array. - -Combining index arrays with slices -================================== - -Index arrays may be combined with slices. For example: :: - - >>> y[np.array([0,2,4]),1:3] - array([[ 1, 2], - [15, 16], - [29, 30]]) - -In effect, the slice is converted to an index array np.array([[1,2]]) (shape (1,2)) that is -broadcast with the index array to produce a resultant array of shape (3,2). - -Likewise, slicing can be combined with broadcasted boolean indices: :: - - >>> y[b[:,5],1:3] - array([[22, 23], - [29, 30]]) - -Structural indexing tools -========================= - -To facilitate easy matching of array shapes with expressions and in -assignments, the np.newaxis object can be used within array indices -to add new dimensions with a size of 1. For example: :: - - >>> y.shape - (5, 7) - >>> y[:,np.newaxis,:].shape - (5, 1, 7) - -Note that there are no new elements in the array, just that the -dimensionality is increased. This can be handy to combine two -arrays in a way that otherwise would require explicitly reshaping -operations. For example: :: - - >>> x = np.arange(5) - >>> x[:,np.newaxis] + x[np.newaxis,:] - array([[0, 1, 2, 3, 4], - [1, 2, 3, 4, 5], - [2, 3, 4, 5, 6], - [3, 4, 5, 6, 7], - [4, 5, 6, 7, 8]]) - -The ellipsis syntax maybe used to indicate selecting in full any -remaining unspecified dimensions. For example: :: - - >>> z = np.arange(81).reshape(3,3,3,3) - >>> z[1,...,2] - array([[29, 32, 35], - [38, 41, 44], - [47, 50, 53]]) - -This is equivalent to: :: - - >>> z[1,:,:,2] - -Assigning values to indexed arrays -================================== - -As mentioned, one can select a subset of an array to assign to using -a single index, slices, and index and mask arrays. The value being -assigned to the indexed array must be shape consistent (the same shape -or broadcastable to the shape the index produces). For example, it is -permitted to assign a constant to a slice: :: - - >>> x[2:7] = 1 - -or an array of the right size: :: - - >>> x[2:7] = np.arange(5) - -Note that assignments may result in changes if assigning -higher types to lower types (like floats to ints) or even -exceptions (assigning complex to floats or ints): :: - - >>> x[1] = 1.2 - >>> x[1] - 1 - >>> x[1] = 1.2j - : can't convert complex to long; use long(abs(z)) - - -Unlike some of the references (such as array and mask indices) -assignments are always made to the original data in the array -(indeed, nothing else would make sense!). Note though, that some -actions may not work as one may naively expect. This particular -example is often surprising to people: :: - - >>> x[np.array([1, 1, 3, 1]) += 1 - -Where people expect that the 1st location will be incremented by 3. -In fact, it will only be incremented by 1. The reason is because -a new array is extracted from the original (as a temporary) containing -the values at 1, 1, 3, 1, then the value 1 is added to the temporary, -and then the temporary is assigned back to the original array. Thus -the value of the array at x[1]+1 is assigned to x[1] three times, -rather than being incremented 3 times. - -Dealing with variable numbers of indices within programs -======================================================== - -The index syntax is very powerful but limiting when dealing with -a variable number of indices. For example, if you want to write -a function that can handle arguments with various numbers of -dimensions without having to write special case code for each -number of possible dimensions, how can that be done? If one -supplies to the index a tuple, the tuple will be interpreted -as a list of indices. For example (using the previous definition -for the array z): :: - - >>> indices = (1,1,1,1) - >>> z[indices] - 40 - -So one can use code to construct tuples of any number of indices -and then use these within an index. - -Slices can be specified within programs by using the slice() function -in Python. For example: :: - - >>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2] - array([39, 40]) - -Likewise, ellipsis can be specified by code by using the Ellipsis object: :: - - >>> indices = (1, Ellipsis, 1) # same as [1,...,1] - >>> z[indices] - array([[28, 31, 34], - [37, 40, 43], - [46, 49, 52]]) - -For this reason it is possible to use the output from the np.where() -function directly as an index since it always returns a tuple of index arrays. - -Because the special treatment of tuples, they are not automatically converted -to an array as a list would be. As an example: :: - - >>> z[[1,1,1,1]] - ... # produces a large array - >>> z[(1,1,1,1)] - 40 # returns a single value - -""" diff --git a/numpy/doc/reference/internals.py b/numpy/doc/reference/internals.py deleted file mode 100644 index a74429368..000000000 --- a/numpy/doc/reference/internals.py +++ /dev/null @@ -1,162 +0,0 @@ -""" -=============== -Array Internals -=============== - -Internal organization of numpy arrays -===================================== - -It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to Numpy". - -Numpy arrays consist of two major components, the raw array data (from now on, -referred to as the data buffer), and the information about the raw array data. -The data buffer is typically what people think of as arrays in C or Fortran, -a contiguous (and fixed) block of memory containing fixed sized data items. -Numpy also contains a significant set of data that describes how to interpret -the data in the data buffer. This extra information contains (among other things): - - 1) The basic data element's size in bytes - 2) The start of the data within the data buffer (an offset relative to the - beginning of the data buffer). - 3) The number of dimensions and the size of each dimension - 4) The separation between elements for each dimension (the 'stride'). This - does not have to be a multiple of the element size - 5) The byte order of the data (which may not be the native byte order) - 6) Whether the buffer is read-only - 7) Information (via the dtype object) about the interpretation of the basic - data element. The basic data element may be as simple as a int or a float, - or it may be a compound object (e.g., struct-like), a fixed character field, - or Python object pointers. - 8) Whether the array is to interpreted as C-order or Fortran-order. - -This arrangement allow for very flexible use of arrays. One thing that it allows -is simple changes of the metadata to change the interpretation of the array buffer. -Changing the byteorder of the array is a simple change involving no rearrangement -of the data. The shape of the array can be changed very easily without changing -anything in the data buffer or any data copying at all - -Among other things that are made possible is one can create a new array metadata -object that uses the same data buffer -to create a new view of that data buffer that has a different interpretation -of the buffer (e.g., different shape, offset, byte order, strides, etc) but -shares the same data bytes. Many operations in numpy do just this such as -slices. Other operations, such as transpose, don't move data elements -around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move. - -Typically these new versions of the array metadata but the same data buffer are -new 'views' into the data buffer. There is a different ndarray object, but it -uses the same data buffer. This is why it is necessary to force copies through -use of the .copy() method if one really wants to make a new and independent -copy of the data buffer. - -New views into arrays mean the the object reference counts for the data buffer -increase. Simply doing away with the original array object will not remove the -data buffer if other views of it still exist. - -Multidimensional Array Indexing Order Issues -============================================ - -What is the right way to index -multi-dimensional arrays? Before you jump to conclusions about the one and -true way to index multi-dimensional arrays, it pays to understand why this is -a confusing issue. This section will try to explain in detail how numpy -indexing works and why we adopt the convention we do for images, and when it -may be appropriate to adopt other conventions. - -The first thing to understand is -that there are two conflicting conventions for indexing 2-dimensional arrays. -Matrix notation uses the first index to indicate which row is being selected and -the second index to indicate which column is selected. This is opposite the -geometrically oriented-convention for images where people generally think the -first index represents x position (i.e., column) and the second represents y -position (i.e., row). This alone is the source of much confusion; -matrix-oriented users and image-oriented users expect two different things with -regard to indexing. - -The second issue to understand is how indices correspond -to the order the array is stored in memory. In Fortran the first index is the -most rapidly varying index when moving through the elements of a two -dimensional array as it is stored in memory. If you adopt the matrix -convention for indexing, then this means the matrix is stored one column at a -time (since the first index moves to the next row as it changes). Thus Fortran -is considered a Column-major language. C has just the opposite convention. In -C, the last index changes most rapidly as one moves through the array as -stored in memory. Thus C is a Row-major language. The matrix is stored by -rows. Note that in both cases it presumes that the matrix convention for -indexing is being used, i.e., for both Fortran and C, the first index is the -row. Note this convention implies that the indexing convention is invariant -and that the data order changes to keep that so. - -But that's not the only way -to look at it. Suppose one has large two-dimensional arrays (images or -matrices) stored in data files. Suppose the data are stored by rows rather than -by columns. If we are to preserve our index convention (whether matrix or -image) that means that depending on the language we use, we may be forced to -reorder the data if it is read into memory to preserve our indexing -convention. For example if we read row-ordered data into memory without -reordering, it will match the matrix indexing convention for C, but not for -Fortran. Conversely, it will match the image indexing convention for Fortran, -but not for C. For C, if one is using data stored in row order, and one wants -to preserve the image index convention, the data must be reordered when -reading into memory. - -In the end, which you do for Fortran or C depends on -which is more important, not reordering data or preserving the indexing -convention. For large images, reordering data is potentially expensive, and -often the indexing convention is inverted to avoid that. - -The situation with -numpy makes this issue yet more complicated. The internal machinery of numpy -arrays is flexible enough to accept any ordering of indices. One can simply -reorder indices by manipulating the internal stride information for arrays -without reordering the data at all. Numpy will know how to map the new index -order to the data without moving the data. - -So if this is true, why not choose -the index order that matches what you most expect? In particular, why not define -row-ordered images to use the image convention? (This is sometimes referred -to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN' -order options for array ordering in numpy.) The drawback of doing this is -potential performance penalties. It's common to access the data sequentially, -either implicitly in array operations or explicitly by looping over rows of an -image. When that is done, then the data will be accessed in non-optimal order. -As the first index is incremented, what is actually happening is that elements -spaced far apart in memory are being sequentially accessed, with usually poor -memory access speeds. For example, for a two dimensional image 'im' defined so -that im[0, 10] represents the value at x=0, y=10. To be consistent with usual -Python behavior then im[0] would represent a column at x=0. Yet that data -would be spread over the whole array since the data are stored in row order. -Despite the flexibility of numpy's indexing, it can't really paper over the fact -basic operations are rendered inefficient because of data order or that getting -contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs -im[0]), thus one can't use an idiom such as for row in im; for col in im does -work, but doesn't yield contiguous column data. - -As it turns out, numpy is -smart enough when dealing with ufuncs to determine which index is the most -rapidly varying one in memory and uses that for the innermost loop. Thus for -ufuncs there is no large intrinsic advantage to either approach in most cases. -On the other hand, use of .flat with an FORTRAN ordered array will lead to -non-optimal memory access as adjacent elements in the flattened array (iterator, -actually) are not contiguous in memory. - -Indeed, the fact is that Python -indexing on lists and other sequences naturally leads to an outside-to inside -ordering (the first index gets the largest grouping, the next the next largest, -and the last gets the smallest element). Since image data are normally stored -by rows, this corresponds to position within rows being the last item indexed. - -If you do want to use Fortran ordering realize that -there are two approaches to consider: 1) accept that the first index is just not -the most rapidly changing in memory and have all your I/O routines reorder -your data when going from memory to disk or visa versa, or use numpy's -mechanism for mapping the first index to the most rapidly varying data. We -recommend the former if possible. The disadvantage of the latter is that many -of numpy's functions will yield arrays without Fortran ordering unless you are -careful to use the 'order' keyword. Doing this would be highly inconvenient. - -Otherwise we recommend simply learning to reverse the usual order of indices -when accessing elements of an array. Granted, it goes against the grain, but -it is more in line with Python semantics and the natural order of the data. - -""" diff --git a/numpy/doc/reference/io.py b/numpy/doc/reference/io.py deleted file mode 100644 index 3cde40bd0..000000000 --- a/numpy/doc/reference/io.py +++ /dev/null @@ -1,9 +0,0 @@ -""" - -========= -Array I/O -========= - -Placeholder for array I/O documentation. - -""" diff --git a/numpy/doc/reference/jargon.py b/numpy/doc/reference/jargon.py deleted file mode 100644 index e13ff5686..000000000 --- a/numpy/doc/reference/jargon.py +++ /dev/null @@ -1,9 +0,0 @@ -""" - -====== -Jargon -====== - -Placeholder for computer science, engineering and other jargon. - -""" diff --git a/numpy/doc/reference/methods_vs_functions.py b/numpy/doc/reference/methods_vs_functions.py deleted file mode 100644 index 22eadccf7..000000000 --- a/numpy/doc/reference/methods_vs_functions.py +++ /dev/null @@ -1,9 +0,0 @@ -""" - -===================== -Methods vs. Functions -===================== - -Placeholder for Methods vs. Functions documentation. - -""" diff --git a/numpy/doc/reference/misc.py b/numpy/doc/reference/misc.py deleted file mode 100644 index e978100bf..000000000 --- a/numpy/doc/reference/misc.py +++ /dev/null @@ -1,9 +0,0 @@ -""" - -============= -Miscellaneous -============= - -Placeholder for other tips. - -""" diff --git a/numpy/doc/reference/performance.py b/numpy/doc/reference/performance.py deleted file mode 100644 index 1429e232f..000000000 --- a/numpy/doc/reference/performance.py +++ /dev/null @@ -1,9 +0,0 @@ -""" - -=========== -Performance -=========== - -Placeholder for Improving Performance documentation. - -""" diff --git a/numpy/doc/reference/structured_arrays.py b/numpy/doc/reference/structured_arrays.py deleted file mode 100644 index 7bbd0deda..000000000 --- a/numpy/doc/reference/structured_arrays.py +++ /dev/null @@ -1,176 +0,0 @@ -""" -===================================== -Structured Arrays (aka Record Arrays) -===================================== - -Introduction -============ - -Numpy provides powerful capabilities to create arrays of structs or records. -These arrays permit one to manipulate the data by the structs or by fields of -the struct. A simple example will show what is meant.: :: - - >>> x = np.zeros((2,),dtype=('i4,f4,a10')) - >>> x[:] = [(1,2.,'Hello'),(2,3.,"World")] - >>> x - array([(1, 2.0, 'Hello'), (2, 3.0, 'World')], - dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')]) - -Here we have created a one-dimensional array of length 2. Each element of -this array is a record that contains three items, a 32-bit integer, a 32-bit -float, and a string of length 10 or less. If we index this array at the second -position we get the second record: :: - - >>> x[1] - (2,3.,"World") - -The interesting aspect is that we can reference the different fields of the -array simply by indexing the array with the string representing the name of -the field. In this case the fields have received the default names of 'f0', 'f1' -and 'f2'. - - >>> y = x['f1'] - >>> y - array([ 2., 3.], dtype=float32) - >>> y[:] = 2*y - >>> y - array([ 4., 6.], dtype=float32) - >>> x - array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], - dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')]) - -In these examples, y is a simple float array consisting of the 2nd field -in the record. But it is not a copy of the data in the structured array, -instead it is a view. It shares exactly the same data. Thus when we updated -this array by doubling its values, the structured array shows the -corresponding values as doubled as well. Likewise, if one changes the record, -the field view changes: :: - - >>> x[1] = (-1,-1.,"Master") - >>> x - array([(1, 4.0, 'Hello'), (-1, -1.0, 'Master')], - dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')]) - >>> y - array([ 4., -1.], dtype=float32) - -Defining Structured Arrays -========================== - -The definition of a structured array is all done through the dtype object. -There are a **lot** of different ways one can define the fields of a -record. Some of variants are there to provide backward compatibility with -Numeric or numarray, or another module, and should not be used except for -such purposes. These will be so noted. One defines records by specifying -the structure by 4 general ways, using an argument (as supplied to a dtype -function keyword or a dtype object constructor itself) in the form of a: -1) string, 2) tuple, 3) list, or 4) dictionary. Each of these will be briefly -described. - -1) String argument (as used in the above examples). -In this case, the constructor is expecting a comma -separated list of type specifiers, optionally with extra shape information. -The type specifiers can take 4 different forms: :: - - a) b1, i1, i2, i4, i8, u1, u2, u4, u8, f4, f8, c8, c16, a - (representing bytes, ints, unsigned ints, floats, complex and - fixed length strings of specified byte lengths) - b) int8,...,uint8,...,float32, float64, complex64, complex128 - (this time with bit sizes) - c) older Numeric/numarray type specifications (e.g. Float32). - Don't use these in new code! - d) Single character type specifiers (e.g H for unsigned short ints). - Avoid using these unless you must. Details can be found in the - Numpy book - -These different styles can be mixed within the same string (but why would you -want to do that?). Furthermore, each type specifier can be prefixed -with a repetition number, or a shape. In these cases an array -element is created, i.e., an array within a record. That array -is still referred to as a single field. An example: :: - - >>> x = np.zeros(3, dtype='3int8, float32, (2,3)float64') - >>> x - array([([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), - ([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), - ([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])], - dtype=[('f0', '|i1', 3), ('f1', '>f4'), ('f2', '>f8', (2, 3))]) - -By using strings to define the record structure, it precludes being -able to name the fields in the original definition. The names can -be changed as shown later, however. - -2) Tuple argument: The only relevant tuple case that applies to record -structures is when a structure is mapped to an existing data type. This -is done by pairing in a tuple, the existing data type with a matching -dtype definition (using any of the variants being described here). As -an example (using a definition using a list, so see 3) for further -details): :: - - >>> x = zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')])) - >>> x - array([0, 0, 0]) - >>> x['r'] - array([0, 0, 0], dtype=uint8) - -In this case, an array is produced that looks and acts like a simple int32 array, -but also has definitions for fields that use only one byte of the int32 (a bit -like Fortran equivalencing). - -3) List argument: In this case the record structure is defined with a list of -tuples. Each tuple has 2 or 3 elements specifying: 1) The name of the field -('' is permitted), 2) the type of the field, and 3) the shape (optional). -For example: - - >>> x = np.zeros(3, dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))]) - >>> x - array([(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]), - (0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]), - (0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]])], - dtype=[('x', '>f4'), ('y', '>f4'), ('value', '>f4', (2, 2))]) - -4) Dictionary argument: two different forms are permitted. The first consists -of a dictionary with two required keys ('names' and 'formats'), each having an -equal sized list of values. The format list contains any type/shape specifier -allowed in other contexts. The names must be strings. There are two optional -keys: 'offsets' and 'titles'. Each must be a correspondingly matching list to -the required two where offsets contain integer offsets for each field, and -titles are objects containing metadata for each field (these do not have -to be strings), where the value of None is permitted. As an example: :: - - >>> x = np.zeros(3, dtype={'names':['col1', 'col2'], 'formats':['i4','f4']}) - >>> x - array([(0, 0.0), (0, 0.0), (0, 0.0)], - dtype=[('col1', '>i4'), ('col2', '>f4')]) - -The other dictionary form permitted is a dictionary of name keys with tuple -values specifying type, offset, and an optional title. - - >>> x = np.zeros(3, dtype={'col1':('i1',0,'title 1'), 'col2':('f4',1,'title 2')}) - array([(0, 0.0), (0, 0.0), (0, 0.0)], - dtype=[(('title 1', 'col1'), '|i1'), (('title 2', 'col2'), '>f4')]) - -Accessing and modifying field names -=================================== - -The field names are an attribute of the dtype object defining the record structure. -For the last example: :: - - >>> x.dtype.names - ('col1', 'col2') - >>> x.dtype.names = ('x', 'y') - >>> x - array([(0, 0.0), (0, 0.0), (0, 0.0)], - dtype=[(('title 1', 'x'), '|i1'), (('title 2', 'y'), '>f4')]) - >>> x.dtype.names = ('x', 'y', 'z') # wrong number of names - : must replace all names at once with a sequence of length 2 - -Accessing field titles -==================================== - -The field titles provide a standard place to put associated info for fields. -They do not have to be strings. - - >>> x.dtype.fields['x'][2] - 'title 1' - -""" diff --git a/numpy/doc/reference/ufuncs.py b/numpy/doc/reference/ufuncs.py deleted file mode 100644 index 4819e5268..000000000 --- a/numpy/doc/reference/ufuncs.py +++ /dev/null @@ -1,135 +0,0 @@ -""" -=================== -Universal Functions -=================== - -Ufuncs are, generally speaking, mathematical functions or operations that are -applied element-by-element to the contents of an array. That is, the result -in each output array element only depends on the value in the corresponding -input array (or arrays) and on no other array elements. Numpy comes with a -large suite of ufuncs, and scipy extends that suite substantially. The simplest -example is the addition operator: :: - - >>> np.array([0,2,3,4]) + np.array([1,1,-1,2]) - array([1, 3, 2, 6]) - -The unfunc module lists all the available ufuncs in numpy. Additional ufuncts -available in xxx in scipy. Documentation on the specific ufuncs may be found -in those modules. This documentation is intended to address the more general -aspects of unfuncs common to most of them. All of the ufuncs that make use of -Python operators (e.g., +, -, etc.) have equivalent functions defined -(e.g. add() for +) - -Type coercion -============= - -What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of -two different types? What is the type of the result? Typically, the result is -the higher of the two types. For example: :: - - float32 + float64 -> float64 - int8 + int32 -> int32 - int16 + float32 -> float32 - float32 + complex64 -> complex64 - -There are some less obvious cases generally involving mixes of types -(e.g. uints, ints and floats) where equal bit sizes for each are not -capable of saving all the information in a different type of equivalent -bit size. Some examples are int32 vs float32 or uint32 vs int32. -Generally, the result is the higher type of larger size than both -(if available). So: :: - - int32 + float32 -> float64 - uint32 + int32 -> int64 - -Finally, the type coercion behavior when expressions involve Python -scalars is different than that seen for arrays. Since Python has a -limited number of types, combining a Python int with a dtype=np.int8 -array does not coerce to the higher type but instead, the type of the -array prevails. So the rules for Python scalars combined with arrays is -that the result will be that of the array equivalent the Python scalar -if the Python scalar is of a higher 'kind' than the array (e.g., float -vs. int), otherwise the resultant type will be that of the array. -For example: :: - - Python int + int8 -> int8 - Python float + int8 -> float64 - -ufunc methods -============= - -Binary ufuncs support 4 methods. These methods are explained in detail in xxx -(or are they, I don't see anything in the ufunc docstring that is useful?). - -**.reduce(arr)** applies the binary operator to elements of the array in sequence. For example: :: - - >>> np.add.reduce(np.arange(10)) # adds all elements of array - 45 - -For multidimensional arrays, the first dimension is reduced by default: :: - - >>> np.add.reduce(np.arange(10).reshape(2,5)) - array([ 5, 7, 9, 11, 13]) - -The axis keyword can be used to specify different axes to reduce: :: - - >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1) - array([10, 35]) - -**.accumulate(arr)** applies the binary operator and generates an an equivalently -shaped array that includes the accumulated amount for each element of the -array. A couple examples: :: - - >>> np.add.accumulate(np.arange(10)) - array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) - >>> np.multiply.accumulate(np.arange(1,9)) - array([ 1, 2, 6, 24, 120, 720, 5040, 40320]) - -The behavior for multidimensional arrays is the same as for .reduce(), as is the use of the axis keyword). - -**.reduceat(arr,indices)** allows one to apply reduce to selected parts of an array. -It is a difficult method to understand. See the documentation at: - -**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and arr2. It will work on multidimensional arrays (the shape of the result is the -concatenation of the two input shapes.: :: - - >>> np.multiply.outer(np.arange(3),np.arange(4)) - array([[0, 0, 0, 0], - [0, 1, 2, 3], - [0, 2, 4, 6]]) - -Output arguments -================ - -All ufuncs accept an optional output array. The array must be of the expected output shape. Beware that if the type of the output array is of a -different (and lower) type than the output result, the results may be silently -truncated or otherwise corrupted in the downcast to the lower type. This usage -is useful when one wants to avoid creating large temporary arrays and instead -allows one to reuse the same array memory repeatedly (at the expense of not -being able to use more convenient operator notation in expressions). Note that -when the output argument is used, the ufunc still returns a reference to the -result. - - >>> x = np.arange(2) - >>> np.add(np.arange(2),np.arange(2.),x) - array([0, 2]) - >>> x - array([0, 2]) - -and & or as ufuncs -================== - -Invariably people try to use the python 'and' and 'or' as logical operators -(and quite understandably). But these operators do not behave as normal -operators since Python treats these quite differently. They cannot be -overloaded with array equivalents. Thus using 'and' or 'or' with an array -results in an error. There are two alternatives: - - 1) use the ufunc functions logical_and() and logical_or(). - 2) use the bitwise operators & and \\|. The drawback of these is that if - the arguments to these operators are not boolean arrays, the result is - likely incorrect. On the other hand, most usages of logical_and and - logical_or are with boolean arrays. As long as one is careful, this is - a convenient way to apply these operators. - -""" diff --git a/numpy/doc/structured_arrays.py b/numpy/doc/structured_arrays.py new file mode 100644 index 000000000..7bbd0deda --- /dev/null +++ b/numpy/doc/structured_arrays.py @@ -0,0 +1,176 @@ +""" +===================================== +Structured Arrays (aka Record Arrays) +===================================== + +Introduction +============ + +Numpy provides powerful capabilities to create arrays of structs or records. +These arrays permit one to manipulate the data by the structs or by fields of +the struct. A simple example will show what is meant.: :: + + >>> x = np.zeros((2,),dtype=('i4,f4,a10')) + >>> x[:] = [(1,2.,'Hello'),(2,3.,"World")] + >>> x + array([(1, 2.0, 'Hello'), (2, 3.0, 'World')], + dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')]) + +Here we have created a one-dimensional array of length 2. Each element of +this array is a record that contains three items, a 32-bit integer, a 32-bit +float, and a string of length 10 or less. If we index this array at the second +position we get the second record: :: + + >>> x[1] + (2,3.,"World") + +The interesting aspect is that we can reference the different fields of the +array simply by indexing the array with the string representing the name of +the field. In this case the fields have received the default names of 'f0', 'f1' +and 'f2'. + + >>> y = x['f1'] + >>> y + array([ 2., 3.], dtype=float32) + >>> y[:] = 2*y + >>> y + array([ 4., 6.], dtype=float32) + >>> x + array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], + dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')]) + +In these examples, y is a simple float array consisting of the 2nd field +in the record. But it is not a copy of the data in the structured array, +instead it is a view. It shares exactly the same data. Thus when we updated +this array by doubling its values, the structured array shows the +corresponding values as doubled as well. Likewise, if one changes the record, +the field view changes: :: + + >>> x[1] = (-1,-1.,"Master") + >>> x + array([(1, 4.0, 'Hello'), (-1, -1.0, 'Master')], + dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')]) + >>> y + array([ 4., -1.], dtype=float32) + +Defining Structured Arrays +========================== + +The definition of a structured array is all done through the dtype object. +There are a **lot** of different ways one can define the fields of a +record. Some of variants are there to provide backward compatibility with +Numeric or numarray, or another module, and should not be used except for +such purposes. These will be so noted. One defines records by specifying +the structure by 4 general ways, using an argument (as supplied to a dtype +function keyword or a dtype object constructor itself) in the form of a: +1) string, 2) tuple, 3) list, or 4) dictionary. Each of these will be briefly +described. + +1) String argument (as used in the above examples). +In this case, the constructor is expecting a comma +separated list of type specifiers, optionally with extra shape information. +The type specifiers can take 4 different forms: :: + + a) b1, i1, i2, i4, i8, u1, u2, u4, u8, f4, f8, c8, c16, a + (representing bytes, ints, unsigned ints, floats, complex and + fixed length strings of specified byte lengths) + b) int8,...,uint8,...,float32, float64, complex64, complex128 + (this time with bit sizes) + c) older Numeric/numarray type specifications (e.g. Float32). + Don't use these in new code! + d) Single character type specifiers (e.g H for unsigned short ints). + Avoid using these unless you must. Details can be found in the + Numpy book + +These different styles can be mixed within the same string (but why would you +want to do that?). Furthermore, each type specifier can be prefixed +with a repetition number, or a shape. In these cases an array +element is created, i.e., an array within a record. That array +is still referred to as a single field. An example: :: + + >>> x = np.zeros(3, dtype='3int8, float32, (2,3)float64') + >>> x + array([([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), + ([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), + ([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])], + dtype=[('f0', '|i1', 3), ('f1', '>f4'), ('f2', '>f8', (2, 3))]) + +By using strings to define the record structure, it precludes being +able to name the fields in the original definition. The names can +be changed as shown later, however. + +2) Tuple argument: The only relevant tuple case that applies to record +structures is when a structure is mapped to an existing data type. This +is done by pairing in a tuple, the existing data type with a matching +dtype definition (using any of the variants being described here). As +an example (using a definition using a list, so see 3) for further +details): :: + + >>> x = zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')])) + >>> x + array([0, 0, 0]) + >>> x['r'] + array([0, 0, 0], dtype=uint8) + +In this case, an array is produced that looks and acts like a simple int32 array, +but also has definitions for fields that use only one byte of the int32 (a bit +like Fortran equivalencing). + +3) List argument: In this case the record structure is defined with a list of +tuples. Each tuple has 2 or 3 elements specifying: 1) The name of the field +('' is permitted), 2) the type of the field, and 3) the shape (optional). +For example: + + >>> x = np.zeros(3, dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))]) + >>> x + array([(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]), + (0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]), + (0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]])], + dtype=[('x', '>f4'), ('y', '>f4'), ('value', '>f4', (2, 2))]) + +4) Dictionary argument: two different forms are permitted. The first consists +of a dictionary with two required keys ('names' and 'formats'), each having an +equal sized list of values. The format list contains any type/shape specifier +allowed in other contexts. The names must be strings. There are two optional +keys: 'offsets' and 'titles'. Each must be a correspondingly matching list to +the required two where offsets contain integer offsets for each field, and +titles are objects containing metadata for each field (these do not have +to be strings), where the value of None is permitted. As an example: :: + + >>> x = np.zeros(3, dtype={'names':['col1', 'col2'], 'formats':['i4','f4']}) + >>> x + array([(0, 0.0), (0, 0.0), (0, 0.0)], + dtype=[('col1', '>i4'), ('col2', '>f4')]) + +The other dictionary form permitted is a dictionary of name keys with tuple +values specifying type, offset, and an optional title. + + >>> x = np.zeros(3, dtype={'col1':('i1',0,'title 1'), 'col2':('f4',1,'title 2')}) + array([(0, 0.0), (0, 0.0), (0, 0.0)], + dtype=[(('title 1', 'col1'), '|i1'), (('title 2', 'col2'), '>f4')]) + +Accessing and modifying field names +=================================== + +The field names are an attribute of the dtype object defining the record structure. +For the last example: :: + + >>> x.dtype.names + ('col1', 'col2') + >>> x.dtype.names = ('x', 'y') + >>> x + array([(0, 0.0), (0, 0.0), (0, 0.0)], + dtype=[(('title 1', 'x'), '|i1'), (('title 2', 'y'), '>f4')]) + >>> x.dtype.names = ('x', 'y', 'z') # wrong number of names + : must replace all names at once with a sequence of length 2 + +Accessing field titles +==================================== + +The field titles provide a standard place to put associated info for fields. +They do not have to be strings. + + >>> x.dtype.fields['x'][2] + 'title 1' + +""" diff --git a/numpy/doc/swig/Makefile b/numpy/doc/swig/Makefile deleted file mode 100644 index b64492f45..000000000 --- a/numpy/doc/swig/Makefile +++ /dev/null @@ -1,36 +0,0 @@ -# List all of the subdirectories here for recursive make -SUBDIRS = test doc - -# Default target -.PHONY : default -default: - @echo "There is no default make target for this Makefile" - @echo "Valid make targets are:" - @echo " test - Compile and run tests of numpy.i" - @echo " doc - Generate numpy.i documentation" - @echo " all - make test + doc" - @echo " clean - Remove generated files recursively" - -# Target all -.PHONY : all -all: $(SUBDIRS) - -# Target test -.PHONY : test -test: - cd $@ && make $@ - -# Target doc -.PHONY : doc -doc: - cd $@ && make - -# Target clean -.PHONY : clean -clean: - @for dir in $(SUBDIRS); do \ - echo ; \ - echo Running \'make clean\' in $$dir; \ - cd $$dir && make clean && cd ..; \ - done; \ - echo diff --git a/numpy/doc/swig/README b/numpy/doc/swig/README deleted file mode 100644 index d557b305f..000000000 --- a/numpy/doc/swig/README +++ /dev/null @@ -1,130 +0,0 @@ -Notes for the numpy/doc/swig directory -====================================== - -This set of files is for developing and testing file numpy.i, which is -intended to be a set of typemaps for helping SWIG interface between C -and C++ code that uses C arrays and the python module NumPy. It is -ultimately hoped that numpy.i will be included as part of the SWIG -distribution. - -Documentation -------------- -Documentation for how to use numpy.i is in the doc directory. The -primary source file here is numpy_swig.txt, a restructured text file -that documents how to use numpy.i. The Makefile in doc allows for the -conversion of numpy_swig.txt to HTML (if you have docutils installed) -and to PDF (if you have docutils and latex/pdftex installed). This -should not be necessary, however, as numpy_swig.html and -numpy_swig.pdf are stored in the repository. - -The same is true for a file called doc/testing.txt, which describes -the testing system used here. - -If you have the prerequisites installed and wish to build the HTML and -PDF documentation, this can be achieved by calling:: - - $ make doc - -from the shell. - -Testing -------- -The tests are a good example of what we are trying to do with numpy.i. -The files related to testing are are in the test subdirectory:: - - Vector.h - Vector.cxx - Vector.i - testVector.py - - Matrix.h - Matrix.cxx - Matrix.i - testMatrix.py - - Tensor.h - Tensor.cxx - Tensor.i - testTensor.py - -The header files contain prototypes for functions that illustrate the -wrapping issues we wish to address. Right now, this consists of -functions with argument signatures of the following forms. Vector.h:: - - (type IN_ARRAY1[ANY]) - (type* IN_ARRAY1, int DIM1) - (int DIM1, type* IN_ARRAY1) - - (type INPLACE_ARRAY1[ANY]) - (type* INPLACE_ARRAY1, int DIM1) - (int DIM1, type* INPLACE_ARRAY1) - - (type ARGOUT_ARRAY1[ANY]) - (type* ARGOUT_ARRAY1, int DIM1) - (int DIM1, type* ARGOUT_ARRAY1) - -Matrix.h:: - - (type IN_ARRAY2[ANY][ANY]) - (type* IN_ARRAY2, int DIM1, int DIM2) - (int DIM1, int DIM2, type* IN_ARRAY2) - - (type INPLACE_ARRAY2[ANY][ANY]) - (type* INPLACE_ARRAY2, int DIM1, int DIM2) - (int DIM1, int DIM2, type* INPLACE_ARRAY2) - - (type ARGOUT_ARRAY2[ANY][ANY]) - -Tensor.h:: - - (type IN_ARRAY3[ANY][ANY][ANY]) - (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) - (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) - - (type INPLACE_ARRAY3[ANY][ANY][ANY]) - (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) - (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) - - (type ARGOUT_ARRAY3[ANY][ANY][ANY]) - -These function signatures take a pointer to an array of type "type", -whose length is specified by the integer(s) DIM1 (and DIM2, and DIM3). - -The objective for the IN_ARRAY signatures is for SWIG to generate -python wrappers that take a container that constitutes a valid -argument to the numpy array constructor, and can be used to build an -array of type "type". Currently, types "signed char", "unsigned -char", "short", "unsigned short", "int", "unsigned int", "long", -"unsigned long", "long long", "unsigned long long", "float", and -"double" are supported and tested. - -The objective for the INPLACE_ARRAY signatures is for SWIG to generate -python wrappers that accept a numpy array of any of the above-listed -types. - -The source files Vector.cxx, Matrix.cxx and Tensor.cxx contain the -actual implementations of the functions described in Vector.h, -Matrix.h and Tensor.h. The python scripts testVector.py, -testMatrix.py and testTensor.py test the resulting python wrappers -using the unittest module. - -The SWIG interface files Vector.i, Matrix.i and Tensor.i are used to -generate the wrapper code. The SWIG_FILE_WITH_INIT macro allows -numpy.i to be used with multiple python modules. If it is specified, -then the %init block found in Vector.i, Matrix.i and Tensor.i are -required. The other things done in Vector.i, Matrix.i and Tensor.i -are the inclusion of the appropriate header file and numpy.i file, and -the "%apply" directives to force the functions to use the typemaps. - -The setup.py script is a standard python distutils script. It defines -_Vector, _Matrix and _Tensor extension modules and Vector, Matrix and -Tensor python modules. The Makefile automates everything, setting up -the dependencies, calling swig to generate the wrappers, and calling -setup.py to compile the wrapper code and generate the shared objects. -Targets "all" (default), "test", "doc" and "clean" are supported. The -"doc" target creates HTML documentation (with make target "html"), and -PDF documentation (with make targets "tex" and "pdf"). - -To build and run the test code, simply execute from the shell:: - - $ make test diff --git a/numpy/doc/swig/doc/Makefile b/numpy/doc/swig/doc/Makefile deleted file mode 100644 index 9223f0481..000000000 --- a/numpy/doc/swig/doc/Makefile +++ /dev/null @@ -1,51 +0,0 @@ -# ReStructured Text -RST2HTML = rst2html.py -RST2LATEX = rst2latex.py -RFLAGS = --generator --time -HTML_FLAGS = --no-xml-declaration -LATEX_FLAGS = -LATEX = pdflatex - -# Web pages that need to be made -WEB_PAGES = numpy_swig.html testing.html - -# LaTeX files that need to be made -LATEX_FILES = numpy_swig.tex testing.tex - -# PDF files that need to be made -PDF_FILES = numpy_swig.pdf testing.pdf - -# Default target: documentation -.PHONY : doc -doc: html pdf - -# HTML target -.PHONY : html -html: $(WEB_PAGES) - -# Rule: %.txt -> %.html -%.html: %.txt - $(RST2HTML) $(RFLAGS) $(HTML_FLAGS) $< $@ - -# LaTeX target -.PHONY : tex -tex: $(LATEX_FILES) - -# Rule: %.txt -> %.tex -%.tex: %.txt - $(RST2LATEX) $(RFLAGS) $(LATEX_FLAGS) $< $@ - -# PDF target -.PHONY : pdf -pdf: $(PDF_FILES) - -# Rule: %.tex -> %.pdf -%.pdf: %.tex - $(LATEX) $< - $(LATEX) $< - -# Clean target -.PHONY : clean -clean: - $(RM) $(LATEX_FILES) - $(RM) *.pyc *.aux *.dvi *.log *.out *~ diff --git a/numpy/doc/swig/doc/numpy_swig.html b/numpy/doc/swig/doc/numpy_swig.html deleted file mode 100644 index ed127f330..000000000 --- a/numpy/doc/swig/doc/numpy_swig.html +++ /dev/null @@ -1,1244 +0,0 @@ - - - - - -numpy.i: a SWIG Interface File for NumPy - - - - - -
-

numpy.i: a SWIG Interface File for NumPy

- --- - - - - - - - -
Author:Bill Spotz
Institution:Sandia National Laboratories
Date:1 December, 2007
- -
-

Introduction

-

The Simple Wrapper and Interface Generator (or SWIG) is a powerful tool for generating wrapper -code for interfacing to a wide variety of scripting languages. -SWIG can parse header files, and using only the code prototypes, -create an interface to the target language. But SWIG is not -omnipotent. For example, it cannot know from the prototype:

-
-double rms(double* seq, int n);
-
-

what exactly seq is. Is it a single value to be altered in-place? -Is it an array, and if so what is its length? Is it input-only? -Output-only? Input-output? SWIG cannot determine these details, -and does not attempt to do so.

-

If we designed rms, we probably made it a routine that takes an -input-only array of length n of double values called seq -and returns the root mean square. The default behavior of SWIG, -however, will be to create a wrapper function that compiles, but is -nearly impossible to use from the scripting language in the way the C -routine was intended.

-

For python, the preferred way of handling -contiguous (or technically, strided) blocks of homogeneous data is -with the module NumPy, which provides full -object-oriented access to multidimensial arrays of data. Therefore, -the most logical python interface for the rms function would be -(including doc string):

-
-def rms(seq):
-    """
-    rms: return the root mean square of a sequence
-    rms(numpy.ndarray) -> double
-    rms(list) -> double
-    rms(tuple) -> double
-    """
-
-

where seq would be a NumPy array of double values, and its -length n would be extracted from seq internally before being -passed to the C routine. Even better, since NumPy supports -construction of arrays from arbitrary python sequences, seq -itself could be a nearly arbitrary sequence (so long as each element -can be converted to a double) and the wrapper code would -internally convert it to a NumPy array before extracting its data -and length.

-

SWIG allows these types of conversions to be defined via a -mechanism called typemaps. This document provides information on how -to use numpy.i, a SWIG interface file that defines a series of -typemaps intended to make the type of array-related conversions -described above relatively simple to implement. For example, suppose -that the rms function prototype defined above was in a header file -named rms.h. To obtain the python interface discussed above, -your SWIG interface file would need the following:

-
-%{
-#define SWIG_FILE_WITH_INIT
-#include "rms.h"
-%}
-
-%include "numpy.i"
-
-%init %{
-import_array();
-%}
-
-%apply (double* IN_ARRAY1, int DIM1) {(double* seq, int n)};
-%include "rms.h"
-
-

Typemaps are keyed off a list of one or more function arguments, -either by type or by type and name. We will refer to such lists as -signatures. One of the many typemaps defined by numpy.i is used -above and has the signature (double* IN_ARRAY1, int DIM1). The -argument names are intended to suggest that the double* argument -is an input array of one dimension and that the int represents -that dimension. This is precisely the pattern in the rms -prototype.

-

Most likely, no actual prototypes to be wrapped will have the argument -names IN_ARRAY1 and DIM1. We use the %apply directive to -apply the typemap for one-dimensional input arrays of type double -to the actual prototype used by rms. Using numpy.i -effectively, therefore, requires knowing what typemaps are available -and what they do.

-

A SWIG interface file that includes the SWIG directives given -above will produce wrapper code that looks something like:

-
- 1 PyObject *_wrap_rms(PyObject *args) {
- 2   PyObject *resultobj = 0;
- 3   double *arg1 = (double *) 0 ;
- 4   int arg2 ;
- 5   double result;
- 6   PyArrayObject *array1 = NULL ;
- 7   int is_new_object1 = 0 ;
- 8   PyObject * obj0 = 0 ;
- 9
-10   if (!PyArg_ParseTuple(args,(char *)"O:rms",&obj0)) SWIG_fail;
-11   {
-12     array1 = obj_to_array_contiguous_allow_conversion(
-13                  obj0, NPY_DOUBLE, &is_new_object1);
-14     npy_intp size[1] = {
-15       -1
-16     };
-17     if (!array1 || !require_dimensions(array1, 1) ||
-18         !require_size(array1, size, 1)) SWIG_fail;
-19     arg1 = (double*) array1->data;
-20     arg2 = (int) array1->dimensions[0];
-21   }
-22   result = (double)rms(arg1,arg2);
-23   resultobj = SWIG_From_double((double)(result));
-24   {
-25     if (is_new_object1 && array1) Py_DECREF(array1);
-26   }
-27   return resultobj;
-28 fail:
-29   {
-30     if (is_new_object1 && array1) Py_DECREF(array1);
-31   }
-32   return NULL;
-33 }
-
-

The typemaps from numpy.i are responsible for the following lines -of code: 12--20, 25 and 30. Line 10 parses the input to the rms -function. From the format string "O:rms", we can see that the -argument list is expected to be a single python object (specified -by the O before the colon) and whose pointer is stored in -obj0. A number of functions, supplied by numpy.i, are called -to make and check the (possible) conversion from a generic python -object to a NumPy array. These functions are explained in the -section Helper Functions, but hopefully their names are -self-explanatory. At line 12 we use obj0 to construct a NumPy -array. At line 17, we check the validity of the result: that it is -non-null and that it has a single dimension of arbitrary length. Once -these states are verified, we extract the data buffer and length in -lines 19 and 20 so that we can call the underlying C function at line -22. Line 25 performs memory management for the case where we have -created a new array that is no longer needed.

-

This code has a significant amount of error handling. Note the -SWIG_fail is a macro for goto fail, refering to the label at -line 28. If the user provides the wrong number of arguments, this -will be caught at line 10. If construction of the NumPy array -fails or produces an array with the wrong number of dimensions, these -errors are caught at line 17. And finally, if an error is detected, -memory is still managed correctly at line 30.

-

Note that if the C function signature was in a different order:

-
-double rms(int n, double* seq);
-
-

that SWIG would not match the typemap signature given above with -the argument list for rms. Fortunately, numpy.i has a set of -typemaps with the data pointer given last:

-
-%apply (int DIM1, double* IN_ARRAY1) {(int n, double* seq)};
-
-

This simply has the effect of switching the definitions of arg1 -and arg2 in lines 3 and 4 of the generated code above, and their -assignments in lines 19 and 20.

-
-
-

Using numpy.i

-

The numpy.i file is currently located in the numpy/docs/swig -sub-directory under the numpy installation directory. Typically, -you will want to copy it to the directory where you are developing -your wrappers. If it is ever adopted by SWIG developers, then it -will be installed in a standard place where SWIG can find it.

-

A simple module that only uses a single SWIG interface file should -include the following:

-
-%{
-#define SWIG_FILE_WITH_INIT
-%}
-%include "numpy.i"
-%init %{
-import_array();
-%}
-
-

Within a compiled python module, import_array() should only get -called once. This could be in a C/C++ file that you have written and -is linked to the module. If this is the case, then none of your -interface files should #define SWIG_FILE_WITH_INIT or call -import_array(). Or, this initialization call could be in a -wrapper file generated by SWIG from an interface file that has the -%init block as above. If this is the case, and you have more than -one SWIG interface file, then only one interface file should -#define SWIG_FILE_WITH_INIT and call import_array().

-
-
-

Available Typemaps

-

The typemap directives provided by numpy.i for arrays of different -data types, say double and int, and dimensions of different -types, say int or long, are identical to one another except -for the C and NumPy type specifications. The typemaps are -therefore implemented (typically behind the scenes) via a macro:

-
-%numpy_typemaps(DATA_TYPE, DATA_TYPECODE, DIM_TYPE)
-
-

that can be invoked for appropriate (DATA_TYPE, DATA_TYPECODE, -DIM_TYPE) triplets. For example:

-
-%numpy_typemaps(double, NPY_DOUBLE, int)
-%numpy_typemaps(int,    NPY_INT   , int)
-
-

The numpy.i interface file uses the %numpy_typemaps macro to -implement typemaps for the following C data types and int -dimension types:

-
-
    -
  • signed char
  • -
  • unsigned char
  • -
  • short
  • -
  • unsigned short
  • -
  • int
  • -
  • unsigned int
  • -
  • long
  • -
  • unsigned long
  • -
  • long long
  • -
  • unsigned long long
  • -
  • float
  • -
  • double
  • -
-
-

In the following descriptions, we reference a generic DATA_TYPE, which -could be any of the C data types listed above, and DIM_TYPE which -should be one of the many types of integers.

-

The typemap signatures are largely differentiated on the name given to -the buffer pointer. Names with FARRAY are for FORTRAN-ordered -arrays, and names with ARRAY are for C-ordered (or 1D arrays).

-
-

Input Arrays

-

Input arrays are defined as arrays of data that are passed into a -routine but are not altered in-place or returned to the user. The -python input array is therefore allowed to be almost any python -sequence (such as a list) that can be converted to the requested type -of array. The input array signatures are

-

1D:

-
-
    -
  • ( DATA_TYPE IN_ARRAY1[ANY] )
  • -
  • ( DATA_TYPE* IN_ARRAY1, int DIM1 )
  • -
  • ( int DIM1, DATA_TYPE* IN_ARRAY1 )
  • -
-
-

2D:

-
-
    -
  • ( DATA_TYPE IN_ARRAY2[ANY][ANY] )
  • -
  • ( DATA_TYPE* IN_ARRAY2, int DIM1, int DIM2 )
  • -
  • ( int DIM1, int DIM2, DATA_TYPE* IN_ARRAY2 )
  • -
  • ( DATA_TYPE* IN_FARRAY2, int DIM1, int DIM2 )
  • -
  • ( int DIM1, int DIM2, DATA_TYPE* IN_FARRAY2 )
  • -
-
-

3D:

-
-
    -
  • ( DATA_TYPE IN_ARRAY3[ANY][ANY][ANY] )
  • -
  • ( DATA_TYPE* IN_ARRAY3, int DIM1, int DIM2, int DIM3 )
  • -
  • ( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_ARRAY3 )
  • -
  • ( DATA_TYPE* IN_FARRAY3, int DIM1, int DIM2, int DIM3 )
  • -
  • ( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_FARRAY3 )
  • -
-
-

The first signature listed, ( DATA_TYPE IN_ARRAY[ANY] ) is for -one-dimensional arrays with hard-coded dimensions. Likewise, -( DATA_TYPE IN_ARRAY2[ANY][ANY] ) is for two-dimensional arrays -with hard-coded dimensions, and similarly for three-dimensional.

-
-
-

In-Place Arrays

-

In-place arrays are defined as arrays that are modified in-place. The -input values may or may not be used, but the values at the time the -function returns are significant. The provided python argument -must therefore be a NumPy array of the required type. The in-place -signatures are

-

1D:

-
-
    -
  • ( DATA_TYPE INPLACE_ARRAY1[ANY] )
  • -
  • ( DATA_TYPE* INPLACE_ARRAY1, int DIM1 )
  • -
  • ( int DIM1, DATA_TYPE* INPLACE_ARRAY1 )
  • -
-
-

2D:

-
-
    -
  • ( DATA_TYPE INPLACE_ARRAY2[ANY][ANY] )
  • -
  • ( DATA_TYPE* INPLACE_ARRAY2, int DIM1, int DIM2 )
  • -
  • ( int DIM1, int DIM2, DATA_TYPE* INPLACE_ARRAY2 )
  • -
  • ( DATA_TYPE* INPLACE_FARRAY2, int DIM1, int DIM2 )
  • -
  • ( int DIM1, int DIM2, DATA_TYPE* INPLACE_FARRAY2 )
  • -
-
-

3D:

-
-
    -
  • ( DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY] )
  • -
  • ( DATA_TYPE* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3 )
  • -
  • ( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_ARRAY3 )
  • -
  • ( DATA_TYPE* INPLACE_FARRAY3, int DIM1, int DIM2, int DIM3 )
  • -
  • ( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_FARRAY3 )
  • -
-
-

These typemaps now check to make sure that the INPLACE_ARRAY -arguments use native byte ordering. If not, an exception is raised.

-
-
-

Argout Arrays

-

Argout arrays are arrays that appear in the input arguments in C, but -are in fact output arrays. This pattern occurs often when there is -more than one output variable and the single return argument is -therefore not sufficient. In python, the convential way to return -multiple arguments is to pack them into a sequence (tuple, list, etc.) -and return the sequence. This is what the argout typemaps do. If a -wrapped function that uses these argout typemaps has more than one -return argument, they are packed into a tuple or list, depending on -the version of python. The python user does not pass these -arrays in, they simply get returned. For the case where a dimension -is specified, the python user must provide that dimension as an -argument. The argout signatures are

-

1D:

-
-
    -
  • ( DATA_TYPE ARGOUT_ARRAY1[ANY] )
  • -
  • ( DATA_TYPE* ARGOUT_ARRAY1, int DIM1 )
  • -
  • ( int DIM1, DATA_TYPE* ARGOUT_ARRAY1 )
  • -
-
-

2D:

-
-
    -
  • ( DATA_TYPE ARGOUT_ARRAY2[ANY][ANY] )
  • -
-
-

3D:

-
-
    -
  • ( DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY] )
  • -
-
-

These are typically used in situations where in C/C++, you would -allocate a(n) array(s) on the heap, and call the function to fill the -array(s) values. In python, the arrays are allocated for you and -returned as new array objects.

-

Note that we support DATA_TYPE* argout typemaps in 1D, but not 2D -or 3D. This is because of a quirk with the SWIG typemap syntax and -cannot be avoided. Note that for these types of 1D typemaps, the -python function will take a single argument representing DIM1.

-
-
-

Argoutview Arrays

-

Argoutview arrays are for when your C code provides you with a view of -its internal data and does not require any memory to be allocated by -the user. This can be dangerous. There is almost no way to guarantee -that the internal data from the C code will remain in existence for -the entire lifetime of the NumPy array that encapsulates it. If -the user destroys the object that provides the view of the data before -destroying the NumPy array, then using that array my result in bad -memory references or segmentation faults. Nevertheless, there are -situations, working with large data sets, where you simply have no -other choice.

-

The C code to be wrapped for argoutview arrays are characterized by -pointers: pointers to the dimensions and double pointers to the data, -so that these values can be passed back to the user. The argoutview -typemap signatures are therefore

-

1D:

-
-
    -
  • ( DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1 )
  • -
  • ( DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1 )
  • -
-
-

2D:

-
-
    -
  • ( DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )
  • -
  • ( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2 )
  • -
  • ( DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )
  • -
  • ( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2 )
  • -
-
-

3D:

-
-
    -
  • ( DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
  • -
  • ( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3)
  • -
  • ( DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
  • -
  • ( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3)
  • -
-
-

Note that arrays with hard-coded dimensions are not supported. These -cannot follow the double pointer signatures of these typemaps.

-
-
-

Output Arrays

-

The numpy.i interface file does not support typemaps for output -arrays, for several reasons. First, C/C++ return arguments are -limited to a single value. This prevents obtaining dimension -information in a general way. Second, arrays with hard-coded lengths -are not permitted as return arguments. In other words:

-
-double[3] newVector(double x, double y, double z);
-
-

is not legal C/C++ syntax. Therefore, we cannot provide typemaps of -the form:

-
-%typemap(out) (TYPE[ANY]);
-
-

If you run into a situation where a function or method is returning a -pointer to an array, your best bet is to write your own version of the -function to be wrapped, either with %extend for the case of class -methods or %ignore and %rename for the case of functions.

-
-
-

Other Common Types: bool

-

Note that C++ type bool is not supported in the list in the -Available Typemaps section. NumPy bools are a single byte, while -the C++ bool is four bytes (at least on my system). Therefore:

-
-%numpy_typemaps(bool, NPY_BOOL, int)
-
-

will result in typemaps that will produce code that reference -improper data lengths. You can implement the following macro -expansion:

-
-%numpy_typemaps(bool, NPY_UINT, int)
-
-

to fix the data length problem, and Input Arrays will work fine, -but In-Place Arrays might fail type-checking.

-
-
-

Other Common Types: complex

-

Typemap conversions for complex floating-point types is also not -supported automatically. This is because python and NumPy are -written in C, which does not have native complex types. Both -python and NumPy implement their own (essentially equivalent) -struct definitions for complex variables:

-
-/* Python */
-typedef struct {double real; double imag;} Py_complex;
-
-/* NumPy */
-typedef struct {float  real, imag;} npy_cfloat;
-typedef struct {double real, imag;} npy_cdouble;
-
-

We could have implemented:

-
-%numpy_typemaps(Py_complex , NPY_CDOUBLE, int)
-%numpy_typemaps(npy_cfloat , NPY_CFLOAT , int)
-%numpy_typemaps(npy_cdouble, NPY_CDOUBLE, int)
-
-

which would have provided automatic type conversions for arrays of -type Py_complex, npy_cfloat and npy_cdouble. However, it -seemed unlikely that there would be any independent (non-python, -non-NumPy) application code that people would be using SWIG to -generate a python interface to, that also used these definitions -for complex types. More likely, these application codes will define -their own complex types, or in the case of C++, use std::complex. -Assuming these data structures are compatible with python and -NumPy complex types, %numpy_typemap expansions as above (with -the user's complex type substituted for the first argument) should -work.

-
-
-
-

NumPy Array Scalars and SWIG

-

SWIG has sophisticated type checking for numerical types. For -example, if your C/C++ routine expects an integer as input, the code -generated by SWIG will check for both python integers and -python long integers, and raise an overflow error if the provided -python integer is too big to cast down to a C integer. With the -introduction of NumPy scalar arrays into your python code, you -might conceivably extract an integer from a NumPy array and attempt -to pass this to a SWIG-wrapped C/C++ function that expects an -int, but the SWIG type checking will not recognize the NumPy -array scalar as an integer. (Often, this does in fact work -- it -depends on whether NumPy recognizes the integer type you are using -as inheriting from the python integer type on the platform you are -using. Sometimes, this means that code that works on a 32-bit machine -will fail on a 64-bit machine.)

-

If you get a python error that looks like the following:

-
-TypeError: in method 'MyClass_MyMethod', argument 2 of type 'int'
-
-

and the argument you are passing is an integer extracted from a -NumPy array, then you have stumbled upon this problem. The -solution is to modify the SWIG type conversion system to accept -Numpy array scalars in addition to the standard integer types. -Fortunately, this capabilitiy has been provided for you. Simply copy -the file:

-
-pyfragments.swg
-
-

to the working build directory for you project, and this problem will -be fixed. It is suggested that you do this anyway, as it only -increases the capabilities of your python interface.

-
-

Why is There a Second File?

-

The SWIG type checking and conversion system is a complicated -combination of C macros, SWIG macros, SWIG typemaps and SWIG -fragments. Fragments are a way to conditionally insert code into your -wrapper file if it is needed, and not insert it if not needed. If -multiple typemaps require the same fragment, the fragment only gets -inserted into your wrapper code once.

-

There is a fragment for converting a python integer to a C -long. There is a different fragment that converts a python -integer to a C int, that calls the rountine defined in the -long fragment. We can make the changes we want here by changing -the definition for the long fragment. SWIG determines the -active definition for a fragment using a "first come, first served" -system. That is, we need to define the fragment for long -conversions prior to SWIG doing it internally. SWIG allows us -to do this by putting our fragment definitions in the file -pyfragments.swg. If we were to put the new fragment definitions -in numpy.i, they would be ignored.

-
-
-
-

Helper Functions

-

The numpy.i file containes several macros and routines that it -uses internally to build its typemaps. However, these functions may -be useful elsewhere in your interface file. These macros and routines -are implemented as fragments, which are described briefly in the -previous section. If you try to use one or more of the following -macros or functions, but your compiler complains that it does not -recognize the symbol, then you need to force these fragments to appear -in your code using:

-
-%fragment("NumPy_Fragments");
-
-

in your SWIG interface file.

-
-

Macros

-
-
-
is_array(a)
-
Evaluates as true if a is non-NULL and can be cast to a -PyArrayObject*.
-
array_type(a)
-
Evaluates to the integer data type code of a, assuming a can -be cast to a PyArrayObject*.
-
array_numdims(a)
-
Evaluates to the integer number of dimensions of a, assuming -a can be cast to a PyArrayObject*.
-
array_dimensions(a)
-
Evaluates to an array of type npy_intp and length -array_numdims(a), giving the lengths of all of the dimensions -of a, assuming a can be cast to a PyArrayObject*.
-
array_size(a,i)
-
Evaluates to the i-th dimension size of a, assuming a -can be cast to a PyArrayObject*.
-
array_data(a)
-
Evaluates to a pointer of type void* that points to the data -buffer of a, assuming a can be cast to a PyArrayObject*.
-
array_is_contiguous(a)
-
Evaluates as true if a is a contiguous array. Equivalent to -(PyArray_ISCONTIGUOUS(a)).
-
array_is_native(a)
-
Evaluates as true if the data buffer of a uses native byte -order. Equivalent to (PyArray_ISNOTSWAPPED(a)).
-
array_is_fortran(a)
-
Evaluates as true if a is FORTRAN ordered.
-
-
-
-
-

Routines

-
-

pytype_string()

-
-

Return type: char*

-

Arguments:

-
    -
  • PyObject* py_obj, a general python object.
  • -
-

Return a string describing the type of py_obj.

-
-

typecode_string()

-
-

Return type: char*

-

Arguments:

-
    -
  • int typecode, a NumPy integer typecode.
  • -
-

Return a string describing the type corresponding to the NumPy -typecode.

-
-

type_match()

-
-

Return type: int

-

Arguments:

-
    -
  • int actual_type, the NumPy typecode of a NumPy array.
  • -
  • int desired_type, the desired NumPy typecode.
  • -
-

Make sure that actual_type is compatible with -desired_type. For example, this allows character and -byte types, or int and long types, to match. This is now -equivalent to PyArray_EquivTypenums().

-
-

obj_to_array_no_conversion()

-
-

Return type: PyArrayObject*

-

Arguments:

-
    -
  • PyObject* input, a general python object.
  • -
  • int typecode, the desired NumPy typecode.
  • -
-

Cast input to a PyArrayObject* if legal, and ensure that -it is of type typecode. If input cannot be cast, or the -typecode is wrong, set a python error and return NULL.

-
-

obj_to_array_allow_conversion()

-
-

Return type: PyArrayObject*

-

Arguments:

-
    -
  • PyObject* input, a general python object.
  • -
  • int typecode, the desired NumPy typecode of the resulting -array.
  • -
  • int* is_new_object, returns a value of 0 if no conversion -performed, else 1.
  • -
-

Convert input to a NumPy array with the given typecode. -On success, return a valid PyArrayObject* with the correct -type. On failure, the python error string will be set and the -routine returns NULL.

-
-

make_contiguous()

-
-

Return type: PyArrayObject*

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
  • int* is_new_object, returns a value of 0 if no conversion -performed, else 1.
  • -
  • int min_dims, minimum allowable dimensions.
  • -
  • int max_dims, maximum allowable dimensions.
  • -
-

Check to see if ary is contiguous. If so, return the input -pointer and flag it as not a new object. If it is not contiguous, -create a new PyArrayObject* using the original data, flag it -as a new object and return the pointer.

-
-

obj_to_array_contiguous_allow_conversion()

-
-

Return type: PyArrayObject*

-

Arguments:

-
    -
  • PyObject* input, a general python object.
  • -
  • int typecode, the desired NumPy typecode of the resulting -array.
  • -
  • int* is_new_object, returns a value of 0 if no conversion -performed, else 1.
  • -
-

Convert input to a contiguous PyArrayObject* of the -specified type. If the input object is not a contiguous -PyArrayObject*, a new one will be created and the new object -flag will be set.

-
-

require_contiguous()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
-

Test whether ary is contiguous. If so, return 1. Otherwise, -set a python error and return 0.

-
-

require_native()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArray_Object* ary, a NumPy array.
  • -
-

Require that ary is not byte-swapped. If the array is not -byte-swapped, return 1. Otherwise, set a python error and -return 0.

-
-

require_dimensions()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
  • int exact_dimensions, the desired number of dimensions.
  • -
-

Require ary to have a specified number of dimensions. If the -array has the specified number of dimensions, return 1. -Otherwise, set a python error and return 0.

-
-

require_dimensions_n()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
  • int* exact_dimensions, an array of integers representing -acceptable numbers of dimensions.
  • -
  • int n, the length of exact_dimensions.
  • -
-

Require ary to have one of a list of specified number of -dimensions. If the array has one of the specified number of -dimensions, return 1. Otherwise, set the python error string -and return 0.

-
-

require_size()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
  • npy_int* size, an array representing the desired lengths of -each dimension.
  • -
  • int n, the length of size.
  • -
-

Require ary to have a specified shape. If the array has the -specified shape, return 1. Otherwise, set the python error -string and return 0.

-
-

require_fortran()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
-

Require the given PyArrayObject to to be FORTRAN ordered. If -the the PyArrayObject is already FORTRAN ordered, do nothing. -Else, set the FORTRAN ordering flag and recompute the strides.

-
-
-
-
-
-

Beyond the Provided Typemaps

-

There are many C or C++ array/NumPy array situations not covered by -a simple %include "numpy.i" and subsequent %apply directives.

-
-

A Common Example

-

Consider a reasonable prototype for a dot product function:

-
-double dot(int len, double* vec1, double* vec2);
-
-

The python interface that we want is:

-
-def dot(vec1, vec2):
-    """
-    dot(PyObject,PyObject) -> double
-    """
-
-

The problem here is that there is one dimension argument and two array -arguments, and our typemaps are set up for dimensions that apply to a -single array (in fact, SWIG does not provide a mechanism for -associating len with vec2 that takes two python input -arguments). The recommended solution is the following:

-
-%apply (int DIM1, double* IN_ARRAY1) {(int len1, double* vec1),
-                                      (int len2, double* vec2)}
-%rename (dot) my_dot;
-%exception my_dot {
-    $action
-    if (PyErr_Occurred()) SWIG_fail;
-}
-%inline %{
-double my_dot(int len1, double* vec1, int len2, double* vec2) {
-    if (len1 != len2) {
-        PyErr_Format(PyExc_ValueError,
-                     "Arrays of lengths (%d,%d) given",
-                     len1, len2);
-        return 0.0;
-    }
-    return dot(len1, vec1, vec2);
-}
-%}
-
-

If the header file that contains the prototype for double dot() -also contains other prototypes that you want to wrap, so that you need -to %include this header file, then you will also need a %ignore -dot; directive, placed after the %rename and before the -%include directives. Or, if the function in question is a class -method, you will want to use %extend rather than %inline in -addition to %ignore.

-

A note on error handling: Note that my_dot returns a -double but that it can also raise a python error. The -resulting wrapper function will return a python float -representation of 0.0 when the vector lengths do not match. Since -this is not NULL, the python interpreter will not know to check -for an error. For this reason, we add the %exception directive -above for my_dot to get the behavior we want (note that -$action is a macro that gets expanded to a valid call to -my_dot). In general, you will probably want to write a SWIG -macro to perform this task.

-
-
-

Other Situations

-

There are other wrapping situations in which numpy.i may be -helpful when you encounter them.

-
-
    -
  • In some situations, it is possible that you could use the -%numpy_templates macro to implement typemaps for your own -types. See the Other Common Types: bool or Other Common -Types: complex sections for examples. Another situation is if -your dimensions are of a type other than int (say long for -example):

    -
    -%numpy_typemaps(double, NPY_DOUBLE, long)
    -
    -
  • -
  • You can use the code in numpy.i to write your own typemaps. -For example, if you had a four-dimensional array as a function -argument, you could cut-and-paste the appropriate -three-dimensional typemaps into your interface file. The -modifications for the fourth dimension would be trivial.

    -
  • -
  • Sometimes, the best approach is to use the %extend directive -to define new methods for your classes (or overload existing ones) -that take a PyObject* (that either is or can be converted to a -PyArrayObject*) instead of a pointer to a buffer. In this -case, the helper routines in numpy.i can be very useful.

    -
  • -
  • Writing typemaps can be a bit nonintuitive. If you have specific -questions about writing SWIG typemaps for NumPy, the -developers of numpy.i do monitor the -Numpy-discussion and -Swig-user mail lists.

    -
  • -
-
-
-
-

A Final Note

-

When you use the %apply directive, as is usually necessary to use -numpy.i, it will remain in effect until you tell SWIG that it -shouldn't be. If the arguments to the functions or methods that you -are wrapping have common names, such as length or vector, -these typemaps may get applied in situations you do not expect or -want. Therefore, it is always a good idea to add a %clear -directive after you are done with a specific typemap:

-
-%apply (double* IN_ARRAY1, int DIM1) {(double* vector, int length)}
-%include "my_header.h"
-%clear (double* vector, int length);
-
-

In general, you should target these typemap signatures specifically -where you want them, and then clear them after you are done.

-
-
-
-

Summary

-

Out of the box, numpy.i provides typemaps that support conversion -between NumPy arrays and C arrays:

-
-
    -
  • That can be one of 12 different scalar types: signed char, -unsigned char, short, unsigned short, int, -unsigned int, long, unsigned long, long long, -unsigned long long, float and double.
  • -
  • That support 41 different argument signatures for each data type, -including:
      -
    • One-dimensional, two-dimensional and three-dimensional arrays.
    • -
    • Input-only, in-place, argout and argoutview behavior.
    • -
    • Hard-coded dimensions, data-buffer-then-dimensions -specification, and dimensions-then-data-buffer specification.
    • -
    • Both C-ordering ("last dimension fastest") or FORTRAN-ordering -("first dimension fastest") support for 2D and 3D arrays.
    • -
    -
  • -
-
-

The numpy.i interface file also provides additional tools for -wrapper developers, including:

-
-
    -
  • A SWIG macro (%numpy_typemaps) with three arguments for -implementing the 41 argument signatures for the user's choice of -(1) C data type, (2) NumPy data type (assuming they match), and -(3) dimension type.
  • -
  • Nine C macros and 13 C functions that can be used to write -specialized typemaps, extensions, or inlined functions that handle -cases not covered by the provided typemaps.
  • -
-
-
-
-

Acknowledgements

-

Many people have worked to glue SWIG and NumPy together (as well -as SWIG and the predecessors of NumPy, Numeric and numarray). -The effort to standardize this work into numpy.i began at the 2005 -SciPy Conference with a conversation between -Fernando Perez and myself. Fernando collected helper functions and -typemaps from Eric Jones, Michael Hunter, Anna Omelchenko and Michael -Sanner. Sebastian Hasse and Georg Holzmann have also provided -additional error checking and use cases. The work of these -contributors has made this end result possible.

-
-
- - - diff --git a/numpy/doc/swig/doc/numpy_swig.pdf b/numpy/doc/swig/doc/numpy_swig.pdf deleted file mode 100644 index 1d4642cf7..000000000 Binary files a/numpy/doc/swig/doc/numpy_swig.pdf and /dev/null differ diff --git a/numpy/doc/swig/doc/numpy_swig.txt b/numpy/doc/swig/doc/numpy_swig.txt deleted file mode 100644 index bfde018bf..000000000 --- a/numpy/doc/swig/doc/numpy_swig.txt +++ /dev/null @@ -1,950 +0,0 @@ -========================================== - numpy.i: a SWIG Interface File for NumPy -========================================== - -:Author: Bill Spotz -:Institution: Sandia National Laboratories -:Date: 1 December, 2007 - -.. contents:: - -Introduction -============ - -The Simple Wrapper and Interface Generator (or `SWIG -`_) is a powerful tool for generating wrapper -code for interfacing to a wide variety of scripting languages. -`SWIG`_ can parse header files, and using only the code prototypes, -create an interface to the target language. But `SWIG`_ is not -omnipotent. For example, it cannot know from the prototype:: - - double rms(double* seq, int n); - -what exactly ``seq`` is. Is it a single value to be altered in-place? -Is it an array, and if so what is its length? Is it input-only? -Output-only? Input-output? `SWIG`_ cannot determine these details, -and does not attempt to do so. - -If we designed ``rms``, we probably made it a routine that takes an -input-only array of length ``n`` of ``double`` values called ``seq`` -and returns the root mean square. The default behavior of `SWIG`_, -however, will be to create a wrapper function that compiles, but is -nearly impossible to use from the scripting language in the way the C -routine was intended. - -For `python `_, the preferred way of handling -contiguous (or technically, *strided*) blocks of homogeneous data is -with the module `NumPy `_, which provides full -object-oriented access to multidimensial arrays of data. Therefore, -the most logical `python`_ interface for the ``rms`` function would be -(including doc string):: - - def rms(seq): - """ - rms: return the root mean square of a sequence - rms(numpy.ndarray) -> double - rms(list) -> double - rms(tuple) -> double - """ - -where ``seq`` would be a `NumPy`_ array of ``double`` values, and its -length ``n`` would be extracted from ``seq`` internally before being -passed to the C routine. Even better, since `NumPy`_ supports -construction of arrays from arbitrary `python`_ sequences, ``seq`` -itself could be a nearly arbitrary sequence (so long as each element -can be converted to a ``double``) and the wrapper code would -internally convert it to a `NumPy`_ array before extracting its data -and length. - -`SWIG`_ allows these types of conversions to be defined via a -mechanism called typemaps. This document provides information on how -to use ``numpy.i``, a `SWIG`_ interface file that defines a series of -typemaps intended to make the type of array-related conversions -described above relatively simple to implement. For example, suppose -that the ``rms`` function prototype defined above was in a header file -named ``rms.h``. To obtain the `python`_ interface discussed above, -your `SWIG`_ interface file would need the following:: - - %{ - #define SWIG_FILE_WITH_INIT - #include "rms.h" - %} - - %include "numpy.i" - - %init %{ - import_array(); - %} - - %apply (double* IN_ARRAY1, int DIM1) {(double* seq, int n)}; - %include "rms.h" - -Typemaps are keyed off a list of one or more function arguments, -either by type or by type and name. We will refer to such lists as -*signatures*. One of the many typemaps defined by ``numpy.i`` is used -above and has the signature ``(double* IN_ARRAY1, int DIM1)``. The -argument names are intended to suggest that the ``double*`` argument -is an input array of one dimension and that the ``int`` represents -that dimension. This is precisely the pattern in the ``rms`` -prototype. - -Most likely, no actual prototypes to be wrapped will have the argument -names ``IN_ARRAY1`` and ``DIM1``. We use the ``%apply`` directive to -apply the typemap for one-dimensional input arrays of type ``double`` -to the actual prototype used by ``rms``. Using ``numpy.i`` -effectively, therefore, requires knowing what typemaps are available -and what they do. - -A `SWIG`_ interface file that includes the `SWIG`_ directives given -above will produce wrapper code that looks something like:: - - 1 PyObject *_wrap_rms(PyObject *args) { - 2 PyObject *resultobj = 0; - 3 double *arg1 = (double *) 0 ; - 4 int arg2 ; - 5 double result; - 6 PyArrayObject *array1 = NULL ; - 7 int is_new_object1 = 0 ; - 8 PyObject * obj0 = 0 ; - 9 - 10 if (!PyArg_ParseTuple(args,(char *)"O:rms",&obj0)) SWIG_fail; - 11 { - 12 array1 = obj_to_array_contiguous_allow_conversion( - 13 obj0, NPY_DOUBLE, &is_new_object1); - 14 npy_intp size[1] = { - 15 -1 - 16 }; - 17 if (!array1 || !require_dimensions(array1, 1) || - 18 !require_size(array1, size, 1)) SWIG_fail; - 19 arg1 = (double*) array1->data; - 20 arg2 = (int) array1->dimensions[0]; - 21 } - 22 result = (double)rms(arg1,arg2); - 23 resultobj = SWIG_From_double((double)(result)); - 24 { - 25 if (is_new_object1 && array1) Py_DECREF(array1); - 26 } - 27 return resultobj; - 28 fail: - 29 { - 30 if (is_new_object1 && array1) Py_DECREF(array1); - 31 } - 32 return NULL; - 33 } - -The typemaps from ``numpy.i`` are responsible for the following lines -of code: 12--20, 25 and 30. Line 10 parses the input to the ``rms`` -function. From the format string ``"O:rms"``, we can see that the -argument list is expected to be a single `python`_ object (specified -by the ``O`` before the colon) and whose pointer is stored in -``obj0``. A number of functions, supplied by ``numpy.i``, are called -to make and check the (possible) conversion from a generic `python`_ -object to a `NumPy`_ array. These functions are explained in the -section `Helper Functions`_, but hopefully their names are -self-explanatory. At line 12 we use ``obj0`` to construct a `NumPy`_ -array. At line 17, we check the validity of the result: that it is -non-null and that it has a single dimension of arbitrary length. Once -these states are verified, we extract the data buffer and length in -lines 19 and 20 so that we can call the underlying C function at line -22. Line 25 performs memory management for the case where we have -created a new array that is no longer needed. - -This code has a significant amount of error handling. Note the -``SWIG_fail`` is a macro for ``goto fail``, refering to the label at -line 28. If the user provides the wrong number of arguments, this -will be caught at line 10. If construction of the `NumPy`_ array -fails or produces an array with the wrong number of dimensions, these -errors are caught at line 17. And finally, if an error is detected, -memory is still managed correctly at line 30. - -Note that if the C function signature was in a different order:: - - double rms(int n, double* seq); - -that `SWIG`_ would not match the typemap signature given above with -the argument list for ``rms``. Fortunately, ``numpy.i`` has a set of -typemaps with the data pointer given last:: - - %apply (int DIM1, double* IN_ARRAY1) {(int n, double* seq)}; - -This simply has the effect of switching the definitions of ``arg1`` -and ``arg2`` in lines 3 and 4 of the generated code above, and their -assignments in lines 19 and 20. - -Using numpy.i -============= - -The ``numpy.i`` file is currently located in the ``numpy/docs/swig`` -sub-directory under the ``numpy`` installation directory. Typically, -you will want to copy it to the directory where you are developing -your wrappers. If it is ever adopted by `SWIG`_ developers, then it -will be installed in a standard place where `SWIG`_ can find it. - -A simple module that only uses a single `SWIG`_ interface file should -include the following:: - - %{ - #define SWIG_FILE_WITH_INIT - %} - %include "numpy.i" - %init %{ - import_array(); - %} - -Within a compiled `python`_ module, ``import_array()`` should only get -called once. This could be in a C/C++ file that you have written and -is linked to the module. If this is the case, then none of your -interface files should ``#define SWIG_FILE_WITH_INIT`` or call -``import_array()``. Or, this initialization call could be in a -wrapper file generated by `SWIG`_ from an interface file that has the -``%init`` block as above. If this is the case, and you have more than -one `SWIG`_ interface file, then only one interface file should -``#define SWIG_FILE_WITH_INIT`` and call ``import_array()``. - -Available Typemaps -================== - -The typemap directives provided by ``numpy.i`` for arrays of different -data types, say ``double`` and ``int``, and dimensions of different -types, say ``int`` or ``long``, are identical to one another except -for the C and `NumPy`_ type specifications. The typemaps are -therefore implemented (typically behind the scenes) via a macro:: - - %numpy_typemaps(DATA_TYPE, DATA_TYPECODE, DIM_TYPE) - -that can be invoked for appropriate ``(DATA_TYPE, DATA_TYPECODE, -DIM_TYPE)`` triplets. For example:: - - %numpy_typemaps(double, NPY_DOUBLE, int) - %numpy_typemaps(int, NPY_INT , int) - -The ``numpy.i`` interface file uses the ``%numpy_typemaps`` macro to -implement typemaps for the following C data types and ``int`` -dimension types: - - * ``signed char`` - * ``unsigned char`` - * ``short`` - * ``unsigned short`` - * ``int`` - * ``unsigned int`` - * ``long`` - * ``unsigned long`` - * ``long long`` - * ``unsigned long long`` - * ``float`` - * ``double`` - -In the following descriptions, we reference a generic ``DATA_TYPE``, which -could be any of the C data types listed above, and ``DIM_TYPE`` which -should be one of the many types of integers. - -The typemap signatures are largely differentiated on the name given to -the buffer pointer. Names with ``FARRAY`` are for FORTRAN-ordered -arrays, and names with ``ARRAY`` are for C-ordered (or 1D arrays). - -Input Arrays ------------- - -Input arrays are defined as arrays of data that are passed into a -routine but are not altered in-place or returned to the user. The -`python`_ input array is therefore allowed to be almost any `python`_ -sequence (such as a list) that can be converted to the requested type -of array. The input array signatures are - -1D: - - * ``( DATA_TYPE IN_ARRAY1[ANY] )`` - * ``( DATA_TYPE* IN_ARRAY1, int DIM1 )`` - * ``( int DIM1, DATA_TYPE* IN_ARRAY1 )`` - -2D: - - * ``( DATA_TYPE IN_ARRAY2[ANY][ANY] )`` - * ``( DATA_TYPE* IN_ARRAY2, int DIM1, int DIM2 )`` - * ``( int DIM1, int DIM2, DATA_TYPE* IN_ARRAY2 )`` - * ``( DATA_TYPE* IN_FARRAY2, int DIM1, int DIM2 )`` - * ``( int DIM1, int DIM2, DATA_TYPE* IN_FARRAY2 )`` - -3D: - - * ``( DATA_TYPE IN_ARRAY3[ANY][ANY][ANY] )`` - * ``( DATA_TYPE* IN_ARRAY3, int DIM1, int DIM2, int DIM3 )`` - * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_ARRAY3 )`` - * ``( DATA_TYPE* IN_FARRAY3, int DIM1, int DIM2, int DIM3 )`` - * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_FARRAY3 )`` - -The first signature listed, ``( DATA_TYPE IN_ARRAY[ANY] )`` is for -one-dimensional arrays with hard-coded dimensions. Likewise, -``( DATA_TYPE IN_ARRAY2[ANY][ANY] )`` is for two-dimensional arrays -with hard-coded dimensions, and similarly for three-dimensional. - -In-Place Arrays ---------------- - -In-place arrays are defined as arrays that are modified in-place. The -input values may or may not be used, but the values at the time the -function returns are significant. The provided `python`_ argument -must therefore be a `NumPy`_ array of the required type. The in-place -signatures are - -1D: - - * ``( DATA_TYPE INPLACE_ARRAY1[ANY] )`` - * ``( DATA_TYPE* INPLACE_ARRAY1, int DIM1 )`` - * ``( int DIM1, DATA_TYPE* INPLACE_ARRAY1 )`` - -2D: - - * ``( DATA_TYPE INPLACE_ARRAY2[ANY][ANY] )`` - * ``( DATA_TYPE* INPLACE_ARRAY2, int DIM1, int DIM2 )`` - * ``( int DIM1, int DIM2, DATA_TYPE* INPLACE_ARRAY2 )`` - * ``( DATA_TYPE* INPLACE_FARRAY2, int DIM1, int DIM2 )`` - * ``( int DIM1, int DIM2, DATA_TYPE* INPLACE_FARRAY2 )`` - -3D: - - * ``( DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY] )`` - * ``( DATA_TYPE* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3 )`` - * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_ARRAY3 )`` - * ``( DATA_TYPE* INPLACE_FARRAY3, int DIM1, int DIM2, int DIM3 )`` - * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_FARRAY3 )`` - -These typemaps now check to make sure that the ``INPLACE_ARRAY`` -arguments use native byte ordering. If not, an exception is raised. - -Argout Arrays -------------- - -Argout arrays are arrays that appear in the input arguments in C, but -are in fact output arrays. This pattern occurs often when there is -more than one output variable and the single return argument is -therefore not sufficient. In `python`_, the convential way to return -multiple arguments is to pack them into a sequence (tuple, list, etc.) -and return the sequence. This is what the argout typemaps do. If a -wrapped function that uses these argout typemaps has more than one -return argument, they are packed into a tuple or list, depending on -the version of `python`_. The `python`_ user does not pass these -arrays in, they simply get returned. For the case where a dimension -is specified, the python user must provide that dimension as an -argument. The argout signatures are - -1D: - - * ``( DATA_TYPE ARGOUT_ARRAY1[ANY] )`` - * ``( DATA_TYPE* ARGOUT_ARRAY1, int DIM1 )`` - * ``( int DIM1, DATA_TYPE* ARGOUT_ARRAY1 )`` - -2D: - - * ``( DATA_TYPE ARGOUT_ARRAY2[ANY][ANY] )`` - -3D: - - * ``( DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY] )`` - -These are typically used in situations where in C/C++, you would -allocate a(n) array(s) on the heap, and call the function to fill the -array(s) values. In `python`_, the arrays are allocated for you and -returned as new array objects. - -Note that we support ``DATA_TYPE*`` argout typemaps in 1D, but not 2D -or 3D. This is because of a quirk with the `SWIG`_ typemap syntax and -cannot be avoided. Note that for these types of 1D typemaps, the -`python`_ function will take a single argument representing ``DIM1``. - -Argoutview Arrays ------------------ - -Argoutview arrays are for when your C code provides you with a view of -its internal data and does not require any memory to be allocated by -the user. This can be dangerous. There is almost no way to guarantee -that the internal data from the C code will remain in existence for -the entire lifetime of the `NumPy`_ array that encapsulates it. If -the user destroys the object that provides the view of the data before -destroying the `NumPy`_ array, then using that array my result in bad -memory references or segmentation faults. Nevertheless, there are -situations, working with large data sets, where you simply have no -other choice. - -The C code to be wrapped for argoutview arrays are characterized by -pointers: pointers to the dimensions and double pointers to the data, -so that these values can be passed back to the user. The argoutview -typemap signatures are therefore - -1D: - - * ``( DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1 )`` - * ``( DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1 )`` - -2D: - - * ``( DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )`` - * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2 )`` - * ``( DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )`` - * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2 )`` - -3D: - - * ``( DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)`` - * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3)`` - * ``( DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)`` - * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3)`` - -Note that arrays with hard-coded dimensions are not supported. These -cannot follow the double pointer signatures of these typemaps. - -Output Arrays -------------- - -The ``numpy.i`` interface file does not support typemaps for output -arrays, for several reasons. First, C/C++ return arguments are -limited to a single value. This prevents obtaining dimension -information in a general way. Second, arrays with hard-coded lengths -are not permitted as return arguments. In other words:: - - double[3] newVector(double x, double y, double z); - -is not legal C/C++ syntax. Therefore, we cannot provide typemaps of -the form:: - - %typemap(out) (TYPE[ANY]); - -If you run into a situation where a function or method is returning a -pointer to an array, your best bet is to write your own version of the -function to be wrapped, either with ``%extend`` for the case of class -methods or ``%ignore`` and ``%rename`` for the case of functions. - -Other Common Types: bool ------------------------- - -Note that C++ type ``bool`` is not supported in the list in the -`Available Typemaps`_ section. NumPy bools are a single byte, while -the C++ ``bool`` is four bytes (at least on my system). Therefore:: - - %numpy_typemaps(bool, NPY_BOOL, int) - -will result in typemaps that will produce code that reference -improper data lengths. You can implement the following macro -expansion:: - - %numpy_typemaps(bool, NPY_UINT, int) - -to fix the data length problem, and `Input Arrays`_ will work fine, -but `In-Place Arrays`_ might fail type-checking. - -Other Common Types: complex ---------------------------- - -Typemap conversions for complex floating-point types is also not -supported automatically. This is because `python`_ and `NumPy`_ are -written in C, which does not have native complex types. Both -`python`_ and `NumPy`_ implement their own (essentially equivalent) -``struct`` definitions for complex variables:: - - /* Python */ - typedef struct {double real; double imag;} Py_complex; - - /* NumPy */ - typedef struct {float real, imag;} npy_cfloat; - typedef struct {double real, imag;} npy_cdouble; - -We could have implemented:: - - %numpy_typemaps(Py_complex , NPY_CDOUBLE, int) - %numpy_typemaps(npy_cfloat , NPY_CFLOAT , int) - %numpy_typemaps(npy_cdouble, NPY_CDOUBLE, int) - -which would have provided automatic type conversions for arrays of -type ``Py_complex``, ``npy_cfloat`` and ``npy_cdouble``. However, it -seemed unlikely that there would be any independent (non-`python`_, -non-`NumPy`_) application code that people would be using `SWIG`_ to -generate a `python`_ interface to, that also used these definitions -for complex types. More likely, these application codes will define -their own complex types, or in the case of C++, use ``std::complex``. -Assuming these data structures are compatible with `python`_ and -`NumPy`_ complex types, ``%numpy_typemap`` expansions as above (with -the user's complex type substituted for the first argument) should -work. - -NumPy Array Scalars and SWIG -============================ - -`SWIG`_ has sophisticated type checking for numerical types. For -example, if your C/C++ routine expects an integer as input, the code -generated by `SWIG`_ will check for both `python`_ integers and -`python`_ long integers, and raise an overflow error if the provided -`python`_ integer is too big to cast down to a C integer. With the -introduction of `NumPy`_ scalar arrays into your `python`_ code, you -might conceivably extract an integer from a `NumPy`_ array and attempt -to pass this to a `SWIG`_-wrapped C/C++ function that expects an -``int``, but the `SWIG`_ type checking will not recognize the `NumPy`_ -array scalar as an integer. (Often, this does in fact work -- it -depends on whether `NumPy`_ recognizes the integer type you are using -as inheriting from the `python`_ integer type on the platform you are -using. Sometimes, this means that code that works on a 32-bit machine -will fail on a 64-bit machine.) - -If you get a `python`_ error that looks like the following:: - - TypeError: in method 'MyClass_MyMethod', argument 2 of type 'int' - -and the argument you are passing is an integer extracted from a -`NumPy`_ array, then you have stumbled upon this problem. The -solution is to modify the `SWIG`_ type conversion system to accept -`Numpy`_ array scalars in addition to the standard integer types. -Fortunately, this capabilitiy has been provided for you. Simply copy -the file:: - - pyfragments.swg - -to the working build directory for you project, and this problem will -be fixed. It is suggested that you do this anyway, as it only -increases the capabilities of your `python`_ interface. - -Why is There a Second File? ---------------------------- - -The `SWIG`_ type checking and conversion system is a complicated -combination of C macros, `SWIG`_ macros, `SWIG`_ typemaps and `SWIG`_ -fragments. Fragments are a way to conditionally insert code into your -wrapper file if it is needed, and not insert it if not needed. If -multiple typemaps require the same fragment, the fragment only gets -inserted into your wrapper code once. - -There is a fragment for converting a `python`_ integer to a C -``long``. There is a different fragment that converts a `python`_ -integer to a C ``int``, that calls the rountine defined in the -``long`` fragment. We can make the changes we want here by changing -the definition for the ``long`` fragment. `SWIG`_ determines the -active definition for a fragment using a "first come, first served" -system. That is, we need to define the fragment for ``long`` -conversions prior to `SWIG`_ doing it internally. `SWIG`_ allows us -to do this by putting our fragment definitions in the file -``pyfragments.swg``. If we were to put the new fragment definitions -in ``numpy.i``, they would be ignored. - -Helper Functions -================ - -The ``numpy.i`` file containes several macros and routines that it -uses internally to build its typemaps. However, these functions may -be useful elsewhere in your interface file. These macros and routines -are implemented as fragments, which are described briefly in the -previous section. If you try to use one or more of the following -macros or functions, but your compiler complains that it does not -recognize the symbol, then you need to force these fragments to appear -in your code using:: - - %fragment("NumPy_Fragments"); - -in your `SWIG`_ interface file. - -Macros ------- - - **is_array(a)** - Evaluates as true if ``a`` is non-``NULL`` and can be cast to a - ``PyArrayObject*``. - - **array_type(a)** - Evaluates to the integer data type code of ``a``, assuming ``a`` can - be cast to a ``PyArrayObject*``. - - **array_numdims(a)** - Evaluates to the integer number of dimensions of ``a``, assuming - ``a`` can be cast to a ``PyArrayObject*``. - - **array_dimensions(a)** - Evaluates to an array of type ``npy_intp`` and length - ``array_numdims(a)``, giving the lengths of all of the dimensions - of ``a``, assuming ``a`` can be cast to a ``PyArrayObject*``. - - **array_size(a,i)** - Evaluates to the ``i``-th dimension size of ``a``, assuming ``a`` - can be cast to a ``PyArrayObject*``. - - **array_data(a)** - Evaluates to a pointer of type ``void*`` that points to the data - buffer of ``a``, assuming ``a`` can be cast to a ``PyArrayObject*``. - - **array_is_contiguous(a)** - Evaluates as true if ``a`` is a contiguous array. Equivalent to - ``(PyArray_ISCONTIGUOUS(a))``. - - **array_is_native(a)** - Evaluates as true if the data buffer of ``a`` uses native byte - order. Equivalent to ``(PyArray_ISNOTSWAPPED(a))``. - - **array_is_fortran(a)** - Evaluates as true if ``a`` is FORTRAN ordered. - -Routines --------- - - **pytype_string()** - - Return type: ``char*`` - - Arguments: - - * ``PyObject* py_obj``, a general `python`_ object. - - Return a string describing the type of ``py_obj``. - - - **typecode_string()** - - Return type: ``char*`` - - Arguments: - - * ``int typecode``, a `NumPy`_ integer typecode. - - Return a string describing the type corresponding to the `NumPy`_ - ``typecode``. - - **type_match()** - - Return type: ``int`` - - Arguments: - - * ``int actual_type``, the `NumPy`_ typecode of a `NumPy`_ array. - - * ``int desired_type``, the desired `NumPy`_ typecode. - - Make sure that ``actual_type`` is compatible with - ``desired_type``. For example, this allows character and - byte types, or int and long types, to match. This is now - equivalent to ``PyArray_EquivTypenums()``. - - - **obj_to_array_no_conversion()** - - Return type: ``PyArrayObject*`` - - Arguments: - - * ``PyObject* input``, a general `python`_ object. - - * ``int typecode``, the desired `NumPy`_ typecode. - - Cast ``input`` to a ``PyArrayObject*`` if legal, and ensure that - it is of type ``typecode``. If ``input`` cannot be cast, or the - ``typecode`` is wrong, set a `python`_ error and return ``NULL``. - - - **obj_to_array_allow_conversion()** - - Return type: ``PyArrayObject*`` - - Arguments: - - * ``PyObject* input``, a general `python`_ object. - - * ``int typecode``, the desired `NumPy`_ typecode of the resulting - array. - - * ``int* is_new_object``, returns a value of 0 if no conversion - performed, else 1. - - Convert ``input`` to a `NumPy`_ array with the given ``typecode``. - On success, return a valid ``PyArrayObject*`` with the correct - type. On failure, the `python`_ error string will be set and the - routine returns ``NULL``. - - - **make_contiguous()** - - Return type: ``PyArrayObject*`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - * ``int* is_new_object``, returns a value of 0 if no conversion - performed, else 1. - - * ``int min_dims``, minimum allowable dimensions. - - * ``int max_dims``, maximum allowable dimensions. - - Check to see if ``ary`` is contiguous. If so, return the input - pointer and flag it as not a new object. If it is not contiguous, - create a new ``PyArrayObject*`` using the original data, flag it - as a new object and return the pointer. - - - **obj_to_array_contiguous_allow_conversion()** - - Return type: ``PyArrayObject*`` - - Arguments: - - * ``PyObject* input``, a general `python`_ object. - - * ``int typecode``, the desired `NumPy`_ typecode of the resulting - array. - - * ``int* is_new_object``, returns a value of 0 if no conversion - performed, else 1. - - Convert ``input`` to a contiguous ``PyArrayObject*`` of the - specified type. If the input object is not a contiguous - ``PyArrayObject*``, a new one will be created and the new object - flag will be set. - - - **require_contiguous()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - Test whether ``ary`` is contiguous. If so, return 1. Otherwise, - set a `python`_ error and return 0. - - - **require_native()** - - Return type: ``int`` - - Arguments: - - * ``PyArray_Object* ary``, a `NumPy`_ array. - - Require that ``ary`` is not byte-swapped. If the array is not - byte-swapped, return 1. Otherwise, set a `python`_ error and - return 0. - - **require_dimensions()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - * ``int exact_dimensions``, the desired number of dimensions. - - Require ``ary`` to have a specified number of dimensions. If the - array has the specified number of dimensions, return 1. - Otherwise, set a `python`_ error and return 0. - - - **require_dimensions_n()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - * ``int* exact_dimensions``, an array of integers representing - acceptable numbers of dimensions. - - * ``int n``, the length of ``exact_dimensions``. - - Require ``ary`` to have one of a list of specified number of - dimensions. If the array has one of the specified number of - dimensions, return 1. Otherwise, set the `python`_ error string - and return 0. - - - **require_size()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - * ``npy_int* size``, an array representing the desired lengths of - each dimension. - - * ``int n``, the length of ``size``. - - Require ``ary`` to have a specified shape. If the array has the - specified shape, return 1. Otherwise, set the `python`_ error - string and return 0. - - - **require_fortran()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - Require the given ``PyArrayObject`` to to be FORTRAN ordered. If - the the ``PyArrayObject`` is already FORTRAN ordered, do nothing. - Else, set the FORTRAN ordering flag and recompute the strides. - - -Beyond the Provided Typemaps -============================ - -There are many C or C++ array/`NumPy`_ array situations not covered by -a simple ``%include "numpy.i"`` and subsequent ``%apply`` directives. - -A Common Example ----------------- - -Consider a reasonable prototype for a dot product function:: - - double dot(int len, double* vec1, double* vec2); - -The `python`_ interface that we want is:: - - def dot(vec1, vec2): - """ - dot(PyObject,PyObject) -> double - """ - -The problem here is that there is one dimension argument and two array -arguments, and our typemaps are set up for dimensions that apply to a -single array (in fact, `SWIG`_ does not provide a mechanism for -associating ``len`` with ``vec2`` that takes two `python`_ input -arguments). The recommended solution is the following:: - - %apply (int DIM1, double* IN_ARRAY1) {(int len1, double* vec1), - (int len2, double* vec2)} - %rename (dot) my_dot; - %exception my_dot { - $action - if (PyErr_Occurred()) SWIG_fail; - } - %inline %{ - double my_dot(int len1, double* vec1, int len2, double* vec2) { - if (len1 != len2) { - PyErr_Format(PyExc_ValueError, - "Arrays of lengths (%d,%d) given", - len1, len2); - return 0.0; - } - return dot(len1, vec1, vec2); - } - %} - -If the header file that contains the prototype for ``double dot()`` -also contains other prototypes that you want to wrap, so that you need -to ``%include`` this header file, then you will also need a ``%ignore -dot;`` directive, placed after the ``%rename`` and before the -``%include`` directives. Or, if the function in question is a class -method, you will want to use ``%extend`` rather than ``%inline`` in -addition to ``%ignore``. - -**A note on error handling:** Note that ``my_dot`` returns a -``double`` but that it can also raise a `python`_ error. The -resulting wrapper function will return a `python`_ float -representation of 0.0 when the vector lengths do not match. Since -this is not ``NULL``, the `python`_ interpreter will not know to check -for an error. For this reason, we add the ``%exception`` directive -above for ``my_dot`` to get the behavior we want (note that -``$action`` is a macro that gets expanded to a valid call to -``my_dot``). In general, you will probably want to write a `SWIG`_ -macro to perform this task. - -Other Situations ----------------- - -There are other wrapping situations in which ``numpy.i`` may be -helpful when you encounter them. - - * In some situations, it is possible that you could use the - ``%numpy_templates`` macro to implement typemaps for your own - types. See the `Other Common Types: bool`_ or `Other Common - Types: complex`_ sections for examples. Another situation is if - your dimensions are of a type other than ``int`` (say ``long`` for - example):: - - %numpy_typemaps(double, NPY_DOUBLE, long) - - * You can use the code in ``numpy.i`` to write your own typemaps. - For example, if you had a four-dimensional array as a function - argument, you could cut-and-paste the appropriate - three-dimensional typemaps into your interface file. The - modifications for the fourth dimension would be trivial. - - * Sometimes, the best approach is to use the ``%extend`` directive - to define new methods for your classes (or overload existing ones) - that take a ``PyObject*`` (that either is or can be converted to a - ``PyArrayObject*``) instead of a pointer to a buffer. In this - case, the helper routines in ``numpy.i`` can be very useful. - - * Writing typemaps can be a bit nonintuitive. If you have specific - questions about writing `SWIG`_ typemaps for `NumPy`_, the - developers of ``numpy.i`` do monitor the - `Numpy-discussion `_ and - `Swig-user `_ mail lists. - -A Final Note ------------- - -When you use the ``%apply`` directive, as is usually necessary to use -``numpy.i``, it will remain in effect until you tell `SWIG`_ that it -shouldn't be. If the arguments to the functions or methods that you -are wrapping have common names, such as ``length`` or ``vector``, -these typemaps may get applied in situations you do not expect or -want. Therefore, it is always a good idea to add a ``%clear`` -directive after you are done with a specific typemap:: - - %apply (double* IN_ARRAY1, int DIM1) {(double* vector, int length)} - %include "my_header.h" - %clear (double* vector, int length); - -In general, you should target these typemap signatures specifically -where you want them, and then clear them after you are done. - -Summary -======= - -Out of the box, ``numpy.i`` provides typemaps that support conversion -between `NumPy`_ arrays and C arrays: - - * That can be one of 12 different scalar types: ``signed char``, - ``unsigned char``, ``short``, ``unsigned short``, ``int``, - ``unsigned int``, ``long``, ``unsigned long``, ``long long``, - ``unsigned long long``, ``float`` and ``double``. - - * That support 41 different argument signatures for each data type, - including: - - + One-dimensional, two-dimensional and three-dimensional arrays. - - + Input-only, in-place, argout and argoutview behavior. - - + Hard-coded dimensions, data-buffer-then-dimensions - specification, and dimensions-then-data-buffer specification. - - + Both C-ordering ("last dimension fastest") or FORTRAN-ordering - ("first dimension fastest") support for 2D and 3D arrays. - -The ``numpy.i`` interface file also provides additional tools for -wrapper developers, including: - - * A `SWIG`_ macro (``%numpy_typemaps``) with three arguments for - implementing the 41 argument signatures for the user's choice of - (1) C data type, (2) `NumPy`_ data type (assuming they match), and - (3) dimension type. - - * Nine C macros and 13 C functions that can be used to write - specialized typemaps, extensions, or inlined functions that handle - cases not covered by the provided typemaps. - -Acknowledgements -================ - -Many people have worked to glue `SWIG`_ and `NumPy`_ together (as well -as `SWIG`_ and the predecessors of `NumPy`_, Numeric and numarray). -The effort to standardize this work into ``numpy.i`` began at the 2005 -`SciPy `_ Conference with a conversation between -Fernando Perez and myself. Fernando collected helper functions and -typemaps from Eric Jones, Michael Hunter, Anna Omelchenko and Michael -Sanner. Sebastian Hasse and Georg Holzmann have also provided -additional error checking and use cases. The work of these -contributors has made this end result possible. diff --git a/numpy/doc/swig/doc/testing.html b/numpy/doc/swig/doc/testing.html deleted file mode 100644 index 3622550df..000000000 --- a/numpy/doc/swig/doc/testing.html +++ /dev/null @@ -1,482 +0,0 @@ - - - - - -Testing the numpy.i Typemaps - - - - - -
-

Testing the numpy.i Typemaps

- --- - - - - - - - -
Author:Bill Spotz
Institution:Sandia National Laboratories
Date:6 April, 2007
- -
-

Introduction

-

Writing tests for the numpy.i SWIG -interface file is a combinatorial headache. At present, 12 different -data types are supported, each with 23 different argument signatures, -for a total of 276 typemaps supported "out of the box". Each of these -typemaps, in turn, might require several unit tests in order to verify -expected behavior for both proper and improper inputs. Currently, -this results in 1,020 individual unit tests that are performed when -make test is run in the numpy/docs/swig subdirectory.

-

To facilitate this many similar unit tests, some high-level -programming techniques are employed, including C and SWIG macros, -as well as python inheritance. The -purpose of this document is to describe the testing infrastructure -employed to verify that the numpy.i typemaps are working as -expected.

-
-
-

Testing Organization

-

There are three indepedent testing frameworks supported, for one-, -two-, and three-dimensional arrays respectively. For one-dimensional -arrays, there are two C++ files, a header and a source, named:

-
-Vector.h
-Vector.cxx
-
-

that contain prototypes and code for a variety of functions that have -one-dimensional arrays as function arguments. The file:

-
-Vector.i
-
-

is a SWIG interface file that defines a python module Vector -that wraps the functions in Vector.h while utilizing the typemaps -in numpy.i to correctly handle the C arrays.

-

The Makefile calls swig to generate Vector.py and -Vector_wrap.cxx, and also executes the setup.py script that -compiles Vector_wrap.cxx and links together the extension module -_Vector.so or _Vector.dylib, depending on the platform. This -extension module and the proxy file Vector.py are both placed in a -subdirectory under the build directory.

-

The actual testing takes place with a python script named:

-
-testVector.py
-
-

that uses the standard python library module unittest, which -performs several tests of each function defined in Vector.h for -each data type supported.

-

Two-dimensional arrays are tested in exactly the same manner. The -above description applies, but with Matrix substituted for -Vector. For three-dimensional tests, substitute Tensor for -Vector. For the descriptions that follow, we will reference the -Vector tests, but the same information applies to Matrix and -Tensor tests.

-

The command make test will ensure that all of the test software is -built and then run all three test scripts.

-
-
-

Testing Header Files

-

Vector.h is a C++ header file that defines a C macro called -TEST_FUNC_PROTOS that takes two arguments: TYPE, which is a -data type name such as unsigned int; and SNAME, which is a -short name for the same data type with no spaces, e.g. uint. This -macro defines several function prototypes that have the prefix -SNAME and have at least one argument that is an array of type -TYPE. Those functions that have return arguments return a -TYPE value.

-

TEST_FUNC_PROTOS is then implemented for all of the data types -supported by numpy.i:

-
-
    -
  • signed char
  • -
  • unsigned char
  • -
  • short
  • -
  • unsigned short
  • -
  • int
  • -
  • unsigned int
  • -
  • long
  • -
  • unsigned long
  • -
  • long long
  • -
  • unsigned long long
  • -
  • float
  • -
  • double
  • -
-
-
-
-

Testing Source Files

-

Vector.cxx is a C++ source file that implements compilable code -for each of the function prototypes specified in Vector.h. It -defines a C macro TEST_FUNCS that has the same arguments and works -in the same way as TEST_FUNC_PROTOS does in Vector.h. -TEST_FUNCS is implemented for each of the 12 data types as above.

-
-
-

Testing SWIG Interface Files

-

Vector.i is a SWIG interface file that defines python module -Vector. It follows the conventions for using numpy.i as -described in the numpy.i documentation. It -defines a SWIG macro %apply_numpy_typemaps that has a single -argument TYPE. It uses the SWIG directive %apply as -described in the numpy.i documentation to apply the provided -typemaps to the argument signatures found in Vector.h. This macro -is then implemented for all of the data types supported by -numpy.i. It then does a %include "Vector.h" to wrap all of -the function prototypes in Vector.h using the typemaps in -numpy.i.

-
-
-

Testing Python Scripts

-

After make is used to build the testing extension modules, -testVector.py can be run to execute the tests. As with other -scripts that use unittest to facilitate unit testing, -testVector.py defines a class that inherits from -unittest.TestCase:

-
-class VectorTestCase(unittest.TestCase):
-
-

However, this class is not run directly. Rather, it serves as a base -class to several other python classes, each one specific to a -particular data type. The VectorTestCase class stores two strings -for typing information:

-
-
-
self.typeStr
-
A string that matches one of the SNAME prefixes used in -Vector.h and Vector.cxx. For example, "double".
-
self.typeCode
-
A short (typically single-character) string that represents a -data type in numpy and corresponds to self.typeStr. For -example, if self.typeStr is "double", then -self.typeCode should be "d".
-
-
-

Each test defined by the VectorTestCase class extracts the python -function it is trying to test by accessing the Vector module's -dictionary:

-
-length = Vector.__dict__[self.typeStr + "Length"]
-
-

In the case of double precision tests, this will return the python -function Vector.doubleLength.

-

We then define a new test case class for each supported data type with -a short definition such as:

-
-class doubleTestCase(VectorTestCase):
-    def __init__(self, methodName="runTest"):
-        VectorTestCase.__init__(self, methodName)
-        self.typeStr  = "double"
-        self.typeCode = "d"
-
-

Each of these 12 classes is collected into a unittest.TestSuite, -which is then executed. Errors and failures are summed together and -returned as the exit argument. Any non-zero result indicates that at -least one test did not pass.

-
-
- - - diff --git a/numpy/doc/swig/doc/testing.pdf b/numpy/doc/swig/doc/testing.pdf deleted file mode 100644 index 9ffcf7575..000000000 Binary files a/numpy/doc/swig/doc/testing.pdf and /dev/null differ diff --git a/numpy/doc/swig/doc/testing.txt b/numpy/doc/swig/doc/testing.txt deleted file mode 100644 index bfd5218e8..000000000 --- a/numpy/doc/swig/doc/testing.txt +++ /dev/null @@ -1,173 +0,0 @@ -============================ -Testing the numpy.i Typemaps -============================ - -:Author: Bill Spotz -:Institution: Sandia National Laboratories -:Date: 6 April, 2007 - -.. contents:: - -Introduction -============ - -Writing tests for the ``numpy.i`` `SWIG `_ -interface file is a combinatorial headache. At present, 12 different -data types are supported, each with 23 different argument signatures, -for a total of 276 typemaps supported "out of the box". Each of these -typemaps, in turn, might require several unit tests in order to verify -expected behavior for both proper and improper inputs. Currently, -this results in 1,020 individual unit tests that are performed when -``make test`` is run in the ``numpy/docs/swig`` subdirectory. - -To facilitate this many similar unit tests, some high-level -programming techniques are employed, including C and `SWIG`_ macros, -as well as `python `_ inheritance. The -purpose of this document is to describe the testing infrastructure -employed to verify that the ``numpy.i`` typemaps are working as -expected. - -Testing Organization -==================== - -There are three indepedent testing frameworks supported, for one-, -two-, and three-dimensional arrays respectively. For one-dimensional -arrays, there are two C++ files, a header and a source, named:: - - Vector.h - Vector.cxx - -that contain prototypes and code for a variety of functions that have -one-dimensional arrays as function arguments. The file:: - - Vector.i - -is a `SWIG`_ interface file that defines a python module ``Vector`` -that wraps the functions in ``Vector.h`` while utilizing the typemaps -in ``numpy.i`` to correctly handle the C arrays. - -The ``Makefile`` calls ``swig`` to generate ``Vector.py`` and -``Vector_wrap.cxx``, and also executes the ``setup.py`` script that -compiles ``Vector_wrap.cxx`` and links together the extension module -``_Vector.so`` or ``_Vector.dylib``, depending on the platform. This -extension module and the proxy file ``Vector.py`` are both placed in a -subdirectory under the ``build`` directory. - -The actual testing takes place with a `python`_ script named:: - - testVector.py - -that uses the standard `python`_ library module ``unittest``, which -performs several tests of each function defined in ``Vector.h`` for -each data type supported. - -Two-dimensional arrays are tested in exactly the same manner. The -above description applies, but with ``Matrix`` substituted for -``Vector``. For three-dimensional tests, substitute ``Tensor`` for -``Vector``. For the descriptions that follow, we will reference the -``Vector`` tests, but the same information applies to ``Matrix`` and -``Tensor`` tests. - -The command ``make test`` will ensure that all of the test software is -built and then run all three test scripts. - -Testing Header Files -==================== - -``Vector.h`` is a C++ header file that defines a C macro called -``TEST_FUNC_PROTOS`` that takes two arguments: ``TYPE``, which is a -data type name such as ``unsigned int``; and ``SNAME``, which is a -short name for the same data type with no spaces, e.g. ``uint``. This -macro defines several function prototypes that have the prefix -``SNAME`` and have at least one argument that is an array of type -``TYPE``. Those functions that have return arguments return a -``TYPE`` value. - -``TEST_FUNC_PROTOS`` is then implemented for all of the data types -supported by ``numpy.i``: - - * ``signed char`` - * ``unsigned char`` - * ``short`` - * ``unsigned short`` - * ``int`` - * ``unsigned int`` - * ``long`` - * ``unsigned long`` - * ``long long`` - * ``unsigned long long`` - * ``float`` - * ``double`` - -Testing Source Files -==================== - -``Vector.cxx`` is a C++ source file that implements compilable code -for each of the function prototypes specified in ``Vector.h``. It -defines a C macro ``TEST_FUNCS`` that has the same arguments and works -in the same way as ``TEST_FUNC_PROTOS`` does in ``Vector.h``. -``TEST_FUNCS`` is implemented for each of the 12 data types as above. - -Testing SWIG Interface Files -============================ - -``Vector.i`` is a `SWIG`_ interface file that defines python module -``Vector``. It follows the conventions for using ``numpy.i`` as -described in the `numpy.i documentation `_. It -defines a `SWIG`_ macro ``%apply_numpy_typemaps`` that has a single -argument ``TYPE``. It uses the `SWIG`_ directive ``%apply`` as -described in the `numpy.i documentation`_ to apply the provided -typemaps to the argument signatures found in ``Vector.h``. This macro -is then implemented for all of the data types supported by -``numpy.i``. It then does a ``%include "Vector.h"`` to wrap all of -the function prototypes in ``Vector.h`` using the typemaps in -``numpy.i``. - -Testing Python Scripts -====================== - -After ``make`` is used to build the testing extension modules, -``testVector.py`` can be run to execute the tests. As with other -scripts that use ``unittest`` to facilitate unit testing, -``testVector.py`` defines a class that inherits from -``unittest.TestCase``:: - - class VectorTestCase(unittest.TestCase): - -However, this class is not run directly. Rather, it serves as a base -class to several other python classes, each one specific to a -particular data type. The ``VectorTestCase`` class stores two strings -for typing information: - - **self.typeStr** - A string that matches one of the ``SNAME`` prefixes used in - ``Vector.h`` and ``Vector.cxx``. For example, ``"double"``. - - **self.typeCode** - A short (typically single-character) string that represents a - data type in numpy and corresponds to ``self.typeStr``. For - example, if ``self.typeStr`` is ``"double"``, then - ``self.typeCode`` should be ``"d"``. - -Each test defined by the ``VectorTestCase`` class extracts the python -function it is trying to test by accessing the ``Vector`` module's -dictionary:: - - length = Vector.__dict__[self.typeStr + "Length"] - -In the case of double precision tests, this will return the python -function ``Vector.doubleLength``. - -We then define a new test case class for each supported data type with -a short definition such as:: - - class doubleTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - -Each of these 12 classes is collected into a ``unittest.TestSuite``, -which is then executed. Errors and failures are summed together and -returned as the exit argument. Any non-zero result indicates that at -least one test did not pass. diff --git a/numpy/doc/swig/numpy.i b/numpy/doc/swig/numpy.i deleted file mode 100644 index 72fc4f9c4..000000000 --- a/numpy/doc/swig/numpy.i +++ /dev/null @@ -1,1634 +0,0 @@ -/* -*- C -*- (not really, but good for syntax highlighting) */ -#ifdef SWIGPYTHON - -%{ -#ifndef SWIG_FILE_WITH_INIT -# define NO_IMPORT_ARRAY -#endif -#include "stdio.h" -#include -%} - -/**********************************************************************/ - -%fragment("NumPy_Backward_Compatibility", "header") -{ -/* Support older NumPy data type names -*/ -%#if NDARRAY_VERSION < 0x01000000 -%#define NPY_BOOL PyArray_BOOL -%#define NPY_BYTE PyArray_BYTE -%#define NPY_UBYTE PyArray_UBYTE -%#define NPY_SHORT PyArray_SHORT -%#define NPY_USHORT PyArray_USHORT -%#define NPY_INT PyArray_INT -%#define NPY_UINT PyArray_UINT -%#define NPY_LONG PyArray_LONG -%#define NPY_ULONG PyArray_ULONG -%#define NPY_LONGLONG PyArray_LONGLONG -%#define NPY_ULONGLONG PyArray_ULONGLONG -%#define NPY_FLOAT PyArray_FLOAT -%#define NPY_DOUBLE PyArray_DOUBLE -%#define NPY_LONGDOUBLE PyArray_LONGDOUBLE -%#define NPY_CFLOAT PyArray_CFLOAT -%#define NPY_CDOUBLE PyArray_CDOUBLE -%#define NPY_CLONGDOUBLE PyArray_CLONGDOUBLE -%#define NPY_OBJECT PyArray_OBJECT -%#define NPY_STRING PyArray_STRING -%#define NPY_UNICODE PyArray_UNICODE -%#define NPY_VOID PyArray_VOID -%#define NPY_NTYPES PyArray_NTYPES -%#define NPY_NOTYPE PyArray_NOTYPE -%#define NPY_CHAR PyArray_CHAR -%#define NPY_USERDEF PyArray_USERDEF -%#define npy_intp intp - -%#define NPY_MAX_BYTE MAX_BYTE -%#define NPY_MIN_BYTE MIN_BYTE -%#define NPY_MAX_UBYTE MAX_UBYTE -%#define NPY_MAX_SHORT MAX_SHORT -%#define NPY_MIN_SHORT MIN_SHORT -%#define NPY_MAX_USHORT MAX_USHORT -%#define NPY_MAX_INT MAX_INT -%#define NPY_MIN_INT MIN_INT -%#define NPY_MAX_UINT MAX_UINT -%#define NPY_MAX_LONG MAX_LONG -%#define NPY_MIN_LONG MIN_LONG -%#define NPY_MAX_ULONG MAX_ULONG -%#define NPY_MAX_LONGLONG MAX_LONGLONG -%#define NPY_MIN_LONGLONG MIN_LONGLONG -%#define NPY_MAX_ULONGLONG MAX_ULONGLONG -%#define NPY_MAX_INTP MAX_INTP -%#define NPY_MIN_INTP MIN_INTP - -%#define NPY_FARRAY FARRAY -%#define NPY_F_CONTIGUOUS F_CONTIGUOUS -%#endif -} - -/**********************************************************************/ - -/* The following code originally appeared in - * enthought/kiva/agg/src/numeric.i written by Eric Jones. It was - * translated from C++ to C by John Hunter. Bill Spotz has modified - * it to fix some minor bugs, upgrade from Numeric to numpy (all - * versions), add some comments and functionality, and convert from - * direct code insertion to SWIG fragments. - */ - -%fragment("NumPy_Macros", "header") -{ -/* Macros to extract array attributes. - */ -%#define is_array(a) ((a) && PyArray_Check((PyArrayObject *)a)) -%#define array_type(a) (int)(PyArray_TYPE(a)) -%#define array_numdims(a) (((PyArrayObject *)a)->nd) -%#define array_dimensions(a) (((PyArrayObject *)a)->dimensions) -%#define array_size(a,i) (((PyArrayObject *)a)->dimensions[i]) -%#define array_data(a) (((PyArrayObject *)a)->data) -%#define array_is_contiguous(a) (PyArray_ISCONTIGUOUS(a)) -%#define array_is_native(a) (PyArray_ISNOTSWAPPED(a)) -%#define array_is_fortran(a) (PyArray_ISFORTRAN(a)) -} - -/**********************************************************************/ - -%fragment("NumPy_Utilities", "header") -{ - /* Given a PyObject, return a string describing its type. - */ - char* pytype_string(PyObject* py_obj) { - if (py_obj == NULL ) return "C NULL value"; - if (py_obj == Py_None ) return "Python None" ; - if (PyCallable_Check(py_obj)) return "callable" ; - if (PyString_Check( py_obj)) return "string" ; - if (PyInt_Check( py_obj)) return "int" ; - if (PyFloat_Check( py_obj)) return "float" ; - if (PyDict_Check( py_obj)) return "dict" ; - if (PyList_Check( py_obj)) return "list" ; - if (PyTuple_Check( py_obj)) return "tuple" ; - if (PyFile_Check( py_obj)) return "file" ; - if (PyModule_Check( py_obj)) return "module" ; - if (PyInstance_Check(py_obj)) return "instance" ; - - return "unkown type"; - } - - /* Given a NumPy typecode, return a string describing the type. - */ - char* typecode_string(int typecode) { - static char* type_names[25] = {"bool", "byte", "unsigned byte", - "short", "unsigned short", "int", - "unsigned int", "long", "unsigned long", - "long long", "unsigned long long", - "float", "double", "long double", - "complex float", "complex double", - "complex long double", "object", - "string", "unicode", "void", "ntypes", - "notype", "char", "unknown"}; - return typecode < 24 ? type_names[typecode] : type_names[24]; - } - - /* Make sure input has correct numpy type. Allow character and byte - * to match. Also allow int and long to match. This is deprecated. - * You should use PyArray_EquivTypenums() instead. - */ - int type_match(int actual_type, int desired_type) { - return PyArray_EquivTypenums(actual_type, desired_type); - } -} - -/**********************************************************************/ - -%fragment("NumPy_Object_to_Array", "header", - fragment="NumPy_Backward_Compatibility", - fragment="NumPy_Macros", - fragment="NumPy_Utilities") -{ - /* Given a PyObject pointer, cast it to a PyArrayObject pointer if - * legal. If not, set the python error string appropriately and - * return NULL. - */ - PyArrayObject* obj_to_array_no_conversion(PyObject* input, int typecode) - { - PyArrayObject* ary = NULL; - if (is_array(input) && (typecode == NPY_NOTYPE || - PyArray_EquivTypenums(array_type(input), typecode))) - { - ary = (PyArrayObject*) input; - } - else if is_array(input) - { - char* desired_type = typecode_string(typecode); - char* actual_type = typecode_string(array_type(input)); - PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. Array of type '%s' given", - desired_type, actual_type); - ary = NULL; - } - else - { - char * desired_type = typecode_string(typecode); - char * actual_type = pytype_string(input); - PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. A '%s' was given", - desired_type, actual_type); - ary = NULL; - } - return ary; - } - - /* Convert the given PyObject to a NumPy array with the given - * typecode. On success, return a valid PyArrayObject* with the - * correct type. On failure, the python error string will be set and - * the routine returns NULL. - */ - PyArrayObject* obj_to_array_allow_conversion(PyObject* input, int typecode, - int* is_new_object) - { - PyArrayObject* ary = NULL; - PyObject* py_obj; - if (is_array(input) && (typecode == NPY_NOTYPE || - PyArray_EquivTypenums(array_type(input),typecode))) - { - ary = (PyArrayObject*) input; - *is_new_object = 0; - } - else - { - py_obj = PyArray_FROMANY(input, typecode, 0, 0, NPY_DEFAULT); - /* If NULL, PyArray_FromObject will have set python error value.*/ - ary = (PyArrayObject*) py_obj; - *is_new_object = 1; - } - return ary; - } - - /* Given a PyArrayObject, check to see if it is contiguous. If so, - * return the input pointer and flag it as not a new object. If it is - * not contiguous, create a new PyArrayObject using the original data, - * flag it as a new object and return the pointer. - */ - PyArrayObject* make_contiguous(PyArrayObject* ary, int* is_new_object, - int min_dims, int max_dims) - { - PyArrayObject* result; - if (array_is_contiguous(ary)) - { - result = ary; - *is_new_object = 0; - } - else - { - result = (PyArrayObject*) PyArray_ContiguousFromObject((PyObject*)ary, - array_type(ary), - min_dims, - max_dims); - *is_new_object = 1; - } - return result; - } - - /* Given a PyArrayObject, check to see if it is Fortran-contiguous. - * If so, return the input pointer, but do not flag it as not a new - * object. If it is not Fortran-contiguous, create a new - * PyArrayObject using the original data, flag it as a new object - * and return the pointer. - */ - PyArrayObject* make_fortran(PyArrayObject* ary, int* is_new_object, - int min_dims, int max_dims) - { - PyArrayObject* result; - if (array_is_fortran(ary)) - { - result = ary; - *is_new_object = 0; - } - else - { - Py_INCREF(ary->descr); - result = (PyArrayObject*) PyArray_FromArray(ary, ary->descr, NPY_FORTRAN); - *is_new_object = 1; - } - return result; - } - - /* Convert a given PyObject to a contiguous PyArrayObject of the - * specified type. If the input object is not a contiguous - * PyArrayObject, a new one will be created and the new object flag - * will be set. - */ - PyArrayObject* obj_to_array_contiguous_allow_conversion(PyObject* input, - int typecode, - int* is_new_object) - { - int is_new1 = 0; - int is_new2 = 0; - PyArrayObject* ary2; - PyArrayObject* ary1 = obj_to_array_allow_conversion(input, typecode, - &is_new1); - if (ary1) - { - ary2 = make_contiguous(ary1, &is_new2, 0, 0); - if ( is_new1 && is_new2) - { - Py_DECREF(ary1); - } - ary1 = ary2; - } - *is_new_object = is_new1 || is_new2; - return ary1; - } - - /* Convert a given PyObject to a Fortran-ordered PyArrayObject of the - * specified type. If the input object is not a Fortran-ordered - * PyArrayObject, a new one will be created and the new object flag - * will be set. - */ - PyArrayObject* obj_to_array_fortran_allow_conversion(PyObject* input, - int typecode, - int* is_new_object) - { - int is_new1 = 0; - int is_new2 = 0; - PyArrayObject* ary2; - PyArrayObject* ary1 = obj_to_array_allow_conversion(input, typecode, - &is_new1); - if (ary1) - { - ary2 = make_fortran(ary1, &is_new2, 0, 0); - if (is_new1 && is_new2) - { - Py_DECREF(ary1); - } - ary1 = ary2; - } - *is_new_object = is_new1 || is_new2; - return ary1; - } - -} /* end fragment */ - - -/**********************************************************************/ - -%fragment("NumPy_Array_Requirements", "header", - fragment="NumPy_Backward_Compatibility", - fragment="NumPy_Macros") -{ - /* Test whether a python object is contiguous. If array is - * contiguous, return 1. Otherwise, set the python error string and - * return 0. - */ - int require_contiguous(PyArrayObject* ary) - { - int contiguous = 1; - if (!array_is_contiguous(ary)) - { - PyErr_SetString(PyExc_TypeError, - "Array must be contiguous. A non-contiguous array was given"); - contiguous = 0; - } - return contiguous; - } - - /* Require that a numpy array is not byte-swapped. If the array is - * not byte-swapped, return 1. Otherwise, set the python error string - * and return 0. - */ - int require_native(PyArrayObject* ary) - { - int native = 1; - if (!array_is_native(ary)) - { - PyErr_SetString(PyExc_TypeError, - "Array must have native byteorder. " - "A byte-swapped array was given"); - native = 0; - } - return native; - } - - /* Require the given PyArrayObject to have a specified number of - * dimensions. If the array has the specified number of dimensions, - * return 1. Otherwise, set the python error string and return 0. - */ - int require_dimensions(PyArrayObject* ary, int exact_dimensions) - { - int success = 1; - if (array_numdims(ary) != exact_dimensions) - { - PyErr_Format(PyExc_TypeError, - "Array must have %d dimensions. Given array has %d dimensions", - exact_dimensions, array_numdims(ary)); - success = 0; - } - return success; - } - - /* Require the given PyArrayObject to have one of a list of specified - * number of dimensions. If the array has one of the specified number - * of dimensions, return 1. Otherwise, set the python error string - * and return 0. - */ - int require_dimensions_n(PyArrayObject* ary, int* exact_dimensions, int n) - { - int success = 0; - int i; - char dims_str[255] = ""; - char s[255]; - for (i = 0; i < n && !success; i++) - { - if (array_numdims(ary) == exact_dimensions[i]) - { - success = 1; - } - } - if (!success) - { - for (i = 0; i < n-1; i++) - { - sprintf(s, "%d, ", exact_dimensions[i]); - strcat(dims_str,s); - } - sprintf(s, " or %d", exact_dimensions[n-1]); - strcat(dims_str,s); - PyErr_Format(PyExc_TypeError, - "Array must have %s dimensions. Given array has %d dimensions", - dims_str, array_numdims(ary)); - } - return success; - } - - /* Require the given PyArrayObject to have a specified shape. If the - * array has the specified shape, return 1. Otherwise, set the python - * error string and return 0. - */ - int require_size(PyArrayObject* ary, npy_intp* size, int n) - { - int i; - int success = 1; - int len; - char desired_dims[255] = "["; - char s[255]; - char actual_dims[255] = "["; - for(i=0; i < n;i++) - { - if (size[i] != -1 && size[i] != array_size(ary,i)) - { - success = 0; - } - } - if (!success) - { - for (i = 0; i < n; i++) - { - if (size[i] == -1) - { - sprintf(s, "*,"); - } - else - { - sprintf(s, "%ld,", (long int)size[i]); - } - strcat(desired_dims,s); - } - len = strlen(desired_dims); - desired_dims[len-1] = ']'; - for (i = 0; i < n; i++) - { - sprintf(s, "%ld,", (long int)array_size(ary,i)); - strcat(actual_dims,s); - } - len = strlen(actual_dims); - actual_dims[len-1] = ']'; - PyErr_Format(PyExc_TypeError, - "Array must have shape of %s. Given array has shape of %s", - desired_dims, actual_dims); - } - return success; - } - - /* Require the given PyArrayObject to to be FORTRAN ordered. If the - * the PyArrayObject is already FORTRAN ordered, do nothing. Else, - * set the FORTRAN ordering flag and recompute the strides. - */ - int require_fortran(PyArrayObject* ary) - { - int success = 1; - int nd = array_numdims(ary); - int i; - if (array_is_fortran(ary)) return success; - /* Set the FORTRAN ordered flag */ - ary->flags = NPY_FARRAY; - /* Recompute the strides */ - ary->strides[0] = ary->strides[nd-1]; - for (i=1; i < nd; ++i) - ary->strides[i] = ary->strides[i-1] * array_size(ary,i-1); - return success; - } -} - -/* Combine all NumPy fragments into one for convenience */ -%fragment("NumPy_Fragments", "header", - fragment="NumPy_Backward_Compatibility", - fragment="NumPy_Macros", - fragment="NumPy_Utilities", - fragment="NumPy_Object_to_Array", - fragment="NumPy_Array_Requirements") { } - -/* End John Hunter translation (with modifications by Bill Spotz) - */ - -/* %numpy_typemaps() macro - * - * This macro defines a family of 41 typemaps that allow C arguments - * of the form - * - * (DATA_TYPE IN_ARRAY1[ANY]) - * (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) - * (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) - * - * (DATA_TYPE IN_ARRAY2[ANY][ANY]) - * (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) - * (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) - * - * (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) - * (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) - * (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) - * - * (DATA_TYPE INPLACE_ARRAY1[ANY]) - * (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) - * (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) - * - * (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) - * (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) - * (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) - * - * (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) - * (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3) - * (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3) - * - * (DATA_TYPE ARGOUT_ARRAY1[ANY]) - * (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) - * (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) - * - * (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) - * - * (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) - * - * (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1) - * (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1) - * - * (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) - * (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2) - * (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) - * (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2) - * - * (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) - * (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3) - * (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) - * (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3) - * - * where "DATA_TYPE" is any type supported by the NumPy module, and - * "DIM_TYPE" is any int-like type suitable for specifying dimensions. - * The difference between "ARRAY" typemaps and "FARRAY" typemaps is - * that the "FARRAY" typemaps expect FORTRAN ordering of - * multidimensional arrays. In python, the dimensions will not need - * to be specified (except for the "DATA_TYPE* ARGOUT_ARRAY1" - * typemaps). The IN_ARRAYs can be a numpy array or any sequence that - * can be converted to a numpy array of the specified type. The - * INPLACE_ARRAYs must be numpy arrays of the appropriate type. The - * ARGOUT_ARRAYs will be returned as new numpy arrays of the - * appropriate type. - * - * These typemaps can be applied to existing functions using the - * %apply directive. For example: - * - * %apply (double* IN_ARRAY1, int DIM1) {(double* series, int length)}; - * double prod(double* series, int length); - * - * %apply (int DIM1, int DIM2, double* INPLACE_ARRAY2) - * {(int rows, int cols, double* matrix )}; - * void floor(int rows, int cols, double* matrix, double f); - * - * %apply (double IN_ARRAY3[ANY][ANY][ANY]) - * {(double tensor[2][2][2] )}; - * %apply (double ARGOUT_ARRAY3[ANY][ANY][ANY]) - * {(double low[2][2][2] )}; - * %apply (double ARGOUT_ARRAY3[ANY][ANY][ANY]) - * {(double upp[2][2][2] )}; - * void luSplit(double tensor[2][2][2], - * double low[2][2][2], - * double upp[2][2][2] ); - * - * or directly with - * - * double prod(double* IN_ARRAY1, int DIM1); - * - * void floor(int DIM1, int DIM2, double* INPLACE_ARRAY2, double f); - * - * void luSplit(double IN_ARRAY3[ANY][ANY][ANY], - * double ARGOUT_ARRAY3[ANY][ANY][ANY], - * double ARGOUT_ARRAY3[ANY][ANY][ANY]); - */ - -%define %numpy_typemaps(DATA_TYPE, DATA_TYPECODE, DIM_TYPE) - -/************************/ -/* Input Array Typemaps */ -/************************/ - -/* Typemap suite for (DATA_TYPE IN_ARRAY1[ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE IN_ARRAY1[ANY]) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE IN_ARRAY1[ANY]) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[1] = { $1_dim0 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 1) || - !require_size(array, size, 1)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(freearg) - (DATA_TYPE IN_ARRAY1[ANY]) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[1] = { -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 1) || - !require_size(array, size, 1)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); -} -%typemap(freearg) - (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[1] = {-1}; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 1) || - !require_size(array, size, 1)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DATA_TYPE*) array_data(array); -} -%typemap(freearg) - (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE IN_ARRAY2[ANY][ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE IN_ARRAY2[ANY][ANY]) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE IN_ARRAY2[ANY][ANY]) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[2] = { $1_dim0, $1_dim1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 2) || - !require_size(array, size, 2)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(freearg) - (DATA_TYPE IN_ARRAY2[ANY][ANY]) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[2] = { -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 2) || - !require_size(array, size, 2)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); -} -%typemap(freearg) - (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[2] = { -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 2) || - !require_size(array, size, 2)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DATA_TYPE*) array_data(array); -} -%typemap(freearg) - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[2] = { -1, -1 }; - array = obj_to_array_fortran_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 2) || - !require_size(array, size, 2) || !require_fortran(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); -} -%typemap(freearg) - (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[2] = { -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 2) || - !require_size(array, size, 2) || !require_fortran(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DATA_TYPE*) array_data(array); -} -%typemap(freearg) - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[3] = { $1_dim0, $1_dim1, $1_dim2 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 3) || - !require_size(array, size, 3)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(freearg) - (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, - * DIM_TYPE DIM3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[3] = { -1, -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 3) || - !require_size(array, size, 3)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); - $4 = (DIM_TYPE) array_size(array,2); -} -%typemap(freearg) - (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, - * DATA_TYPE* IN_ARRAY3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[3] = { -1, -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 3) || - !require_size(array, size, 3)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DIM_TYPE) array_size(array,2); - $4 = (DATA_TYPE*) array_data(array); -} -%typemap(freearg) - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, - * DIM_TYPE DIM3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[3] = { -1, -1, -1 }; - array = obj_to_array_fortran_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 3) || - !require_size(array, size, 3) | !require_fortran(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); - $4 = (DIM_TYPE) array_size(array,2); -} -%typemap(freearg) - (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, - * DATA_TYPE* IN_FARRAY3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[3] = { -1, -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 3) || - !require_size(array, size, 3) || !require_fortran(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DIM_TYPE) array_size(array,2); - $4 = (DATA_TYPE*) array_data(array); -} -%typemap(freearg) - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/***************************/ -/* In-Place Array Typemaps */ -/***************************/ - -/* Typemap suite for (DATA_TYPE INPLACE_ARRAY1[ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE INPLACE_ARRAY1[ANY]) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE INPLACE_ARRAY1[ANY]) - (PyArrayObject* array=NULL) -{ - npy_intp size[1] = { $1_dim0 }; - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,1) || !require_size(array, size, 1) || - !require_contiguous(array) || !require_native(array)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} - -/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) - (PyArrayObject* array=NULL, int i=1) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,1) || !require_contiguous(array) - || !require_native(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = 1; - for (i=0; i < array_numdims(array); ++i) $2 *= array_size(array,i); -} - -/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) - (PyArrayObject* array=NULL, int i=0) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,1) || !require_contiguous(array) - || !require_native(array)) SWIG_fail; - $1 = 1; - for (i=0; i < array_numdims(array); ++i) $1 *= array_size(array,i); - $2 = (DATA_TYPE*) array_data(array); -} - -/* Typemap suite for (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) - (PyArrayObject* array=NULL) -{ - npy_intp size[2] = { $1_dim0, $1_dim1 }; - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,2) || !require_size(array, size, 2) || - !require_contiguous(array) || !require_native(array)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} - -/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,2) || !require_contiguous(array) - || !require_native(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,2) || !require_contiguous(array) || - !require_native(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DATA_TYPE*) array_data(array); -} - -/* Typemap suite for (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,2) || !require_contiguous(array) - || !require_native(array) || !require_fortran(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,2) || !require_contiguous(array) || - !require_native(array) || !require_fortran(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DATA_TYPE*) array_data(array); -} - -/* Typemap suite for (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) - (PyArrayObject* array=NULL) -{ - npy_intp size[3] = { $1_dim0, $1_dim1, $1_dim2 }; - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,3) || !require_size(array, size, 3) || - !require_contiguous(array) || !require_native(array)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} - -/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, - * DIM_TYPE DIM3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,3) || !require_contiguous(array) || - !require_native(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); - $4 = (DIM_TYPE) array_size(array,2); -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, - * DATA_TYPE* INPLACE_ARRAY3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,3) || !require_contiguous(array) - || !require_native(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DIM_TYPE) array_size(array,2); - $4 = (DATA_TYPE*) array_data(array); -} - -/* Typemap suite for (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, - * DIM_TYPE DIM3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,3) || !require_contiguous(array) || - !require_native(array) || !require_fortran(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); - $4 = (DIM_TYPE) array_size(array,2); -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, - * DATA_TYPE* INPLACE_FARRAY3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,3) || !require_contiguous(array) - || !require_native(array) || !require_fortran(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DIM_TYPE) array_size(array,2); - $4 = (DATA_TYPE*) array_data(array); -} - -/*************************/ -/* Argout Array Typemaps */ -/*************************/ - -/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY1[ANY]) - */ -%typemap(in,numinputs=0, - fragment="NumPy_Backward_Compatibility,NumPy_Macros") - (DATA_TYPE ARGOUT_ARRAY1[ANY]) - (PyObject * array = NULL) -{ - npy_intp dims[1] = { $1_dim0 }; - array = PyArray_SimpleNew(1, dims, DATA_TYPECODE); - if (!array) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(argout) - (DATA_TYPE ARGOUT_ARRAY1[ANY]) -{ - $result = SWIG_Python_AppendOutput($result,array$argnum); -} - -/* Typemap suite for (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) - */ -%typemap(in,numinputs=1, - fragment="NumPy_Fragments") - (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) - (PyObject * array = NULL) -{ - npy_intp dims[1]; - if (!PyInt_Check($input)) - { - char* typestring = pytype_string($input); - PyErr_Format(PyExc_TypeError, - "Int dimension expected. '%s' given.", - typestring); - SWIG_fail; - } - $2 = (DIM_TYPE) PyInt_AsLong($input); - dims[0] = (npy_intp) $2; - array = PyArray_SimpleNew(1, dims, DATA_TYPECODE); - if (!array) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); -} -%typemap(argout) - (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) -{ - $result = SWIG_Python_AppendOutput($result,array$argnum); -} - -/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) - */ -%typemap(in,numinputs=1, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) - (PyObject * array = NULL) -{ - npy_intp dims[1]; - if (!PyInt_Check($input)) - { - char* typestring = pytype_string($input); - PyErr_Format(PyExc_TypeError, - "Int dimension expected. '%s' given.", - typestring); - SWIG_fail; - } - $1 = (DIM_TYPE) PyInt_AsLong($input); - dims[0] = (npy_intp) $1; - array = PyArray_SimpleNew(1, dims, DATA_TYPECODE); - if (!array) SWIG_fail; - $2 = (DATA_TYPE*) array_data(array); -} -%typemap(argout) - (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) -{ - $result = SWIG_Python_AppendOutput($result,array$argnum); -} - -/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) - */ -%typemap(in,numinputs=0, - fragment="NumPy_Backward_Compatibility,NumPy_Macros") - (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) - (PyObject * array = NULL) -{ - npy_intp dims[2] = { $1_dim0, $1_dim1 }; - array = PyArray_SimpleNew(2, dims, DATA_TYPECODE); - if (!array) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(argout) - (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) -{ - $result = SWIG_Python_AppendOutput($result,array$argnum); -} - -/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) - */ -%typemap(in,numinputs=0, - fragment="NumPy_Backward_Compatibility,NumPy_Macros") - (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) - (PyObject * array = NULL) -{ - npy_intp dims[3] = { $1_dim0, $1_dim1, $1_dim2 }; - array = PyArray_SimpleNew(3, dims, DATA_TYPECODE); - if (!array) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(argout) - (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) -{ - $result = SWIG_Python_AppendOutput($result,array$argnum); -} - -/*****************************/ -/* Argoutview Array Typemaps */ -/*****************************/ - -/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1 ) - (DATA_TYPE* data_temp , DIM_TYPE dim_temp) -{ - $1 = &data_temp; - $2 = &dim_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1) -{ - npy_intp dims[1] = { *$2 }; - PyObject * array = PyArray_SimpleNewFromData(1, dims, DATA_TYPECODE, (void*)(*$1)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1 , DATA_TYPE** ARGOUTVIEW_ARRAY1) - (DIM_TYPE dim_temp, DATA_TYPE* data_temp ) -{ - $1 = &dim_temp; - $2 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1) -{ - npy_intp dims[1] = { *$1 }; - PyObject * array = PyArray_SimpleNewFromData(1, dims, DATA_TYPECODE, (void*)(*$2)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 ) - (DATA_TYPE* data_temp , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp) -{ - $1 = &data_temp; - $2 = &dim1_temp; - $3 = &dim2_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) -{ - npy_intp dims[2] = { *$2, *$3 }; - PyObject * array = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$1)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DATA_TYPE** ARGOUTVIEW_ARRAY2) - (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DATA_TYPE* data_temp ) -{ - $1 = &dim1_temp; - $2 = &dim2_temp; - $3 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2) -{ - npy_intp dims[2] = { *$1, *$2 }; - PyObject * array = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$3)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 ) - (DATA_TYPE* data_temp , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp) -{ - $1 = &data_temp; - $2 = &dim1_temp; - $3 = &dim2_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") - (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) -{ - npy_intp dims[2] = { *$2, *$3 }; - PyObject * obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$1)); - PyArrayObject * array = (PyArrayObject*) obj; - if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DATA_TYPE** ARGOUTVIEW_FARRAY2) - (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DATA_TYPE* data_temp ) -{ - $1 = &dim1_temp; - $2 = &dim2_temp; - $3 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2) -{ - npy_intp dims[2] = { *$1, *$2 }; - PyObject * obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$3)); - PyArrayObject * array = (PyArrayObject*) obj; - if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); -} - -/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, - DIM_TYPE* DIM3) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) - (DATA_TYPE* data_temp, DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp) -{ - $1 = &data_temp; - $2 = &dim1_temp; - $3 = &dim2_temp; - $4 = &dim3_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) -{ - npy_intp dims[3] = { *$2, *$3, *$4 }; - PyObject * array = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$1)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, - DATA_TYPE** ARGOUTVIEW_ARRAY3) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3) - (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DATA_TYPE* data_temp) -{ - $1 = &dim1_temp; - $2 = &dim2_temp; - $3 = &dim3_temp; - $4 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3) -{ - npy_intp dims[3] = { *$1, *$2, *$3 }; - PyObject * array = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$3)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, - DIM_TYPE* DIM3) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) - (DATA_TYPE* data_temp, DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp) -{ - $1 = &data_temp; - $2 = &dim1_temp; - $3 = &dim2_temp; - $4 = &dim3_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") - (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) -{ - npy_intp dims[3] = { *$2, *$3, *$4 }; - PyObject * obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$1)); - PyArrayObject * array = (PyArrayObject*) obj; - if (!array || require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, - DATA_TYPE** ARGOUTVIEW_FARRAY3) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3) - (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DATA_TYPE* data_temp) -{ - $1 = &dim1_temp; - $2 = &dim2_temp; - $3 = &dim3_temp; - $4 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3) -{ - npy_intp dims[3] = { *$1, *$2, *$3 }; - PyObject * obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$3)); - PyArrayObject * array = (PyArrayObject*) obj; - if (!array || require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); -} - -%enddef /* %numpy_typemaps() macro */ -/* *************************************************************** */ - -/* Concrete instances of the %numpy_typemaps() macro: Each invocation - * below applies all of the typemaps above to the specified data type. - */ -%numpy_typemaps(signed char , NPY_BYTE , int) -%numpy_typemaps(unsigned char , NPY_UBYTE , int) -%numpy_typemaps(short , NPY_SHORT , int) -%numpy_typemaps(unsigned short , NPY_USHORT , int) -%numpy_typemaps(int , NPY_INT , int) -%numpy_typemaps(unsigned int , NPY_UINT , int) -%numpy_typemaps(long , NPY_LONG , int) -%numpy_typemaps(unsigned long , NPY_ULONG , int) -%numpy_typemaps(long long , NPY_LONGLONG , int) -%numpy_typemaps(unsigned long long, NPY_ULONGLONG, int) -%numpy_typemaps(float , NPY_FLOAT , int) -%numpy_typemaps(double , NPY_DOUBLE , int) - -/* *************************************************************** - * The follow macro expansion does not work, because C++ bool is 4 - * bytes and NPY_BOOL is 1 byte - * - * %numpy_typemaps(bool, NPY_BOOL, int) - */ - -/* *************************************************************** - * On my Mac, I get the following warning for this macro expansion: - * 'swig/python detected a memory leak of type 'long double *', no destructor found.' - * - * %numpy_typemaps(long double, NPY_LONGDOUBLE, int) - */ - -/* *************************************************************** - * Swig complains about a syntax error for the following macro - * expansions: - * - * %numpy_typemaps(complex float, NPY_CFLOAT , int) - * - * %numpy_typemaps(complex double, NPY_CDOUBLE, int) - * - * %numpy_typemaps(complex long double, NPY_CLONGDOUBLE, int) - */ - -#endif /* SWIGPYTHON */ diff --git a/numpy/doc/swig/pyfragments.swg b/numpy/doc/swig/pyfragments.swg deleted file mode 100644 index 0deaa61e1..000000000 --- a/numpy/doc/swig/pyfragments.swg +++ /dev/null @@ -1,174 +0,0 @@ -/*-*- C -*-*/ - -/**********************************************************************/ - -/* For numpy versions prior to 1.0, the names of certain data types - * are different than in later versions. This fragment provides macro - * substitutions that allow us to support old and new versions of - * numpy. - */ - -%fragment("NumPy_Backward_Compatibility", "header") -{ -/* Support older NumPy data type names - */ -%#if NDARRAY_VERSION < 0x01000000 -%#define NPY_BOOL PyArray_BOOL -%#define NPY_BYTE PyArray_BYTE -%#define NPY_UBYTE PyArray_UBYTE -%#define NPY_SHORT PyArray_SHORT -%#define NPY_USHORT PyArray_USHORT -%#define NPY_INT PyArray_INT -%#define NPY_UINT PyArray_UINT -%#define NPY_LONG PyArray_LONG -%#define NPY_ULONG PyArray_ULONG -%#define NPY_LONGLONG PyArray_LONGLONG -%#define NPY_ULONGLONG PyArray_ULONGLONG -%#define NPY_FLOAT PyArray_FLOAT -%#define NPY_DOUBLE PyArray_DOUBLE -%#define NPY_LONGDOUBLE PyArray_LONGDOUBLE -%#define NPY_CFLOAT PyArray_CFLOAT -%#define NPY_CDOUBLE PyArray_CDOUBLE -%#define NPY_CLONGDOUBLE PyArray_CLONGDOUBLE -%#define NPY_OBJECT PyArray_OBJECT -%#define NPY_STRING PyArray_STRING -%#define NPY_UNICODE PyArray_UNICODE -%#define NPY_VOID PyArray_VOID -%#define NPY_NTYPES PyArray_NTYPES -%#define NPY_NOTYPE PyArray_NOTYPE -%#define NPY_CHAR PyArray_CHAR -%#define NPY_USERDEF PyArray_USERDEF -%#define npy_intp intp - -%#define NPY_MAX_BYTE MAX_BYTE -%#define NPY_MIN_BYTE MIN_BYTE -%#define NPY_MAX_UBYTE MAX_UBYTE -%#define NPY_MAX_SHORT MAX_SHORT -%#define NPY_MIN_SHORT MIN_SHORT -%#define NPY_MAX_USHORT MAX_USHORT -%#define NPY_MAX_INT MAX_INT -%#define NPY_MIN_INT MIN_INT -%#define NPY_MAX_UINT MAX_UINT -%#define NPY_MAX_LONG MAX_LONG -%#define NPY_MIN_LONG MIN_LONG -%#define NPY_MAX_ULONG MAX_ULONG -%#define NPY_MAX_LONGLONG MAX_LONGLONG -%#define NPY_MIN_LONGLONG MIN_LONGLONG -%#define NPY_MAX_ULONGLONG MAX_ULONGLONG -%#define NPY_MAX_INTP MAX_INTP -%#define NPY_MIN_INTP MIN_INTP - -%#define NPY_FARRAY FARRAY -%#define NPY_F_CONTIGUOUS F_CONTIGUOUS -%#endif -} - -/**********************************************************************/ - -/* Override the SWIG_AsVal_frag(long) fragment so that it also checks - * for numpy scalar array types. The code through the %#endif is - * essentially cut-and-paste from pyprimtype.swg - */ - -%fragment(SWIG_AsVal_frag(long), "header", - fragment="SWIG_CanCastAsInteger", - fragment="NumPy_Backward_Compatibility") -{ - SWIGINTERN int - SWIG_AsVal_dec(long)(PyObject * obj, long * val) - { - static PyArray_Descr * longDescr = PyArray_DescrNewFromType(NPY_LONG); - if (PyInt_Check(obj)) { - if (val) *val = PyInt_AsLong(obj); - return SWIG_OK; - } else if (PyLong_Check(obj)) { - long v = PyLong_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_OK; - } else { - PyErr_Clear(); - } - } -%#ifdef SWIG_PYTHON_CAST_MODE - { - int dispatch = 0; - long v = PyInt_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_AddCast(SWIG_OK); - } else { - PyErr_Clear(); - } - if (!dispatch) { - double d; - int res = SWIG_AddCast(SWIG_AsVal(double)(obj,&d)); - if (SWIG_IsOK(res) && SWIG_CanCastAsInteger(&d, LONG_MIN, LONG_MAX)) { - if (val) *val = (long)(d); - return res; - } - } - } -%#endif - if (!PyArray_IsScalar(obj,Integer)) return SWIG_TypeError; - PyArray_CastScalarToCtype(obj, (void*)val, longDescr); - return SWIG_OK; - } -} - - -/* Override the SWIG_AsVal_frag(unsigned long) fragment so that it - * also checks for numpy scalar array types. The code through the - * %#endif is essentially cut-and-paste from pyprimtype.swg - */ - -%fragment(SWIG_AsVal_frag(unsigned long),"header", - fragment="SWIG_CanCastAsInteger", - fragment="NumPy_Backward_Compatibility") -{ - SWIGINTERN int - SWIG_AsVal_dec(unsigned long)(PyObject *obj, unsigned long *val) - { - static PyArray_Descr * ulongDescr = PyArray_DescrNewFromType(NPY_ULONG); - if (PyInt_Check(obj)) { - long v = PyInt_AsLong(obj); - if (v >= 0) { - if (val) *val = v; - return SWIG_OK; - } else { - return SWIG_OverflowError; - } - } else if (PyLong_Check(obj)) { - unsigned long v = PyLong_AsUnsignedLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_OK; - } else { - PyErr_Clear(); - } - } -%#ifdef SWIG_PYTHON_CAST_MODE - { - int dispatch = 0; - unsigned long v = PyLong_AsUnsignedLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_AddCast(SWIG_OK); - } else { - PyErr_Clear(); - } - if (!dispatch) { - double d; - int res = SWIG_AddCast(SWIG_AsVal(double)(obj,&d)); - if (SWIG_IsOK(res) && SWIG_CanCastAsInteger(&d, 0, ULONG_MAX)) { - if (val) *val = (unsigned long)(d); - return res; - } - } - } -%#endif - if (!PyArray_IsScalar(obj,Integer)) return SWIG_TypeError; - PyArray_CastScalarToCtype(obj, (void*)val, ulongDescr); - return SWIG_OK; - } -} diff --git a/numpy/doc/swig/test/Array.i b/numpy/doc/swig/test/Array.i deleted file mode 100644 index d56dd2d1c..000000000 --- a/numpy/doc/swig/test/Array.i +++ /dev/null @@ -1,107 +0,0 @@ -// -*- c++ -*- - -%module Array - -%{ -#define SWIG_FILE_WITH_INIT -#include "Array1.h" -#include "Array2.h" -%} - -// Get the NumPy typemaps -%include "../numpy.i" - - // Get the STL typemaps -%include "stl.i" - -// Handle standard exceptions -%include "exception.i" -%exception -{ - try - { - $action - } - catch (const std::invalid_argument& e) - { - SWIG_exception(SWIG_ValueError, e.what()); - } - catch (const std::out_of_range& e) - { - SWIG_exception(SWIG_IndexError, e.what()); - } -} -%init %{ - import_array(); -%} - -// Global ignores -%ignore *::operator=; -%ignore *::operator[]; - -// Apply the 1D NumPy typemaps -%apply (int DIM1 , long* INPLACE_ARRAY1) - {(int length, long* data )}; -%apply (long** ARGOUTVIEW_ARRAY1, int* DIM1 ) - {(long** data , int* length)}; - -// Apply the 2D NumPy typemaps -%apply (int DIM1 , int DIM2 , long* INPLACE_ARRAY2) - {(int nrows, int ncols, long* data )}; -%apply (int* DIM1 , int* DIM2 , long** ARGOUTVIEW_ARRAY2) - {(int* nrows, int* ncols, long** data )}; -// Note: the %apply for INPLACE_ARRAY2 above gets successfully applied -// to the constructor Array2(int nrows, int ncols, long* data), but -// does not get applied to the method Array2::resize(int nrows, int -// ncols, long* data). I have no idea why. For this reason the test -// for Apply2.resize(numpy.ndarray) in testArray.py is commented out. - -// Array1 support -%include "Array1.h" -%extend Array1 -{ - void __setitem__(int i, long v) - { - self->operator[](i) = v; - } - - long __getitem__(int i) - { - return self->operator[](i); - } - - int __len__() - { - return self->length(); - } - - std::string __str__() - { - return self->asString(); - } -} - -// Array2 support -%include "Array2.h" -%extend Array2 -{ - void __setitem__(int i, Array1 & v) - { - self->operator[](i) = v; - } - - Array1 & __getitem__(int i) - { - return self->operator[](i); - } - - int __len__() - { - return self->nrows() * self->ncols(); - } - - std::string __str__() - { - return self->asString(); - } -} diff --git a/numpy/doc/swig/test/Array1.cxx b/numpy/doc/swig/test/Array1.cxx deleted file mode 100644 index 0c09e02f9..000000000 --- a/numpy/doc/swig/test/Array1.cxx +++ /dev/null @@ -1,131 +0,0 @@ -#include "Array1.h" -#include -#include - -// Default/length/array constructor -Array1::Array1(int length, long* data) : - _ownData(false), _length(0), _buffer(0) -{ - resize(length, data); -} - -// Copy constructor -Array1::Array1(const Array1 & source) : - _length(source._length) -{ - allocateMemory(); - *this = source; -} - -// Destructor -Array1::~Array1() -{ - deallocateMemory(); -} - -// Assignment operator -Array1 & Array1::operator=(const Array1 & source) -{ - int len = _length < source._length ? _length : source._length; - for (int i=0; i < len; ++i) - { - (*this)[i] = source[i]; - } - return *this; -} - -// Equals operator -bool Array1::operator==(const Array1 & other) const -{ - if (_length != other._length) return false; - for (int i=0; i < _length; ++i) - { - if ((*this)[i] != other[i]) return false; - } - return true; -} - -// Length accessor -int Array1::length() const -{ - return _length; -} - -// Resize array -void Array1::resize(int length, long* data) -{ - if (length < 0) throw std::invalid_argument("Array1 length less than 0"); - if (length == _length) return; - deallocateMemory(); - _length = length; - if (!data) - { - allocateMemory(); - } - else - { - _ownData = false; - _buffer = data; - } -} - -// Set item accessor -long & Array1::operator[](int i) -{ - if (i < 0 || i >= _length) throw std::out_of_range("Array1 index out of range"); - return _buffer[i]; -} - -// Get item accessor -const long & Array1::operator[](int i) const -{ - if (i < 0 || i >= _length) throw std::out_of_range("Array1 index out of range"); - return _buffer[i]; -} - -// String output -std::string Array1::asString() const -{ - std::stringstream result; - result << "["; - for (int i=0; i < _length; ++i) - { - result << " " << _buffer[i]; - if (i < _length-1) result << ","; - } - result << " ]"; - return result.str(); -} - -// Get view -void Array1::view(long** data, int* length) const -{ - *data = _buffer; - *length = _length; -} - -// Private methods - void Array1::allocateMemory() - { - if (_length == 0) - { - _ownData = false; - _buffer = 0; - } - else - { - _ownData = true; - _buffer = new long[_length]; - } - } - - void Array1::deallocateMemory() - { - if (_ownData && _length && _buffer) - { - delete [] _buffer; - } - _ownData = false; - _length = 0; - _buffer = 0; - } diff --git a/numpy/doc/swig/test/Array1.h b/numpy/doc/swig/test/Array1.h deleted file mode 100644 index 754c248fc..000000000 --- a/numpy/doc/swig/test/Array1.h +++ /dev/null @@ -1,55 +0,0 @@ -#ifndef ARRAY1_H -#define ARRAY1_H - -#include -#include - -class Array1 -{ -public: - - // Default/length/array constructor - Array1(int length = 0, long* data = 0); - - // Copy constructor - Array1(const Array1 & source); - - // Destructor - ~Array1(); - - // Assignment operator - Array1 & operator=(const Array1 & source); - - // Equals operator - bool operator==(const Array1 & other) const; - - // Length accessor - int length() const; - - // Resize array - void resize(int length, long* data = 0); - - // Set item accessor - long & operator[](int i); - - // Get item accessor - const long & operator[](int i) const; - - // String output - std::string asString() const; - - // Get view - void view(long** data, int* length) const; - -private: - // Members - bool _ownData; - int _length; - long * _buffer; - - // Methods - void allocateMemory(); - void deallocateMemory(); -}; - -#endif diff --git a/numpy/doc/swig/test/Array2.cxx b/numpy/doc/swig/test/Array2.cxx deleted file mode 100644 index e3558f786..000000000 --- a/numpy/doc/swig/test/Array2.cxx +++ /dev/null @@ -1,168 +0,0 @@ -#include "Array2.h" -#include - -// Default constructor -Array2::Array2() : - _ownData(false), _nrows(0), _ncols(), _buffer(0), _rows(0) -{ } - -// Size/array constructor -Array2::Array2(int nrows, int ncols, long* data) : - _ownData(false), _nrows(0), _ncols(), _buffer(0), _rows(0) -{ - resize(nrows, ncols, data); -} - -// Copy constructor -Array2::Array2(const Array2 & source) : - _nrows(source._nrows), _ncols(source._ncols) -{ - _ownData = true; - allocateMemory(); - *this = source; -} - -// Destructor -Array2::~Array2() -{ - deallocateMemory(); -} - -// Assignment operator -Array2 & Array2::operator=(const Array2 & source) -{ - int nrows = _nrows < source._nrows ? _nrows : source._nrows; - int ncols = _ncols < source._ncols ? _ncols : source._ncols; - for (int i=0; i < nrows; ++i) - { - for (int j=0; j < ncols; ++j) - { - (*this)[i][j] = source[i][j]; - } - } - return *this; -} - -// Equals operator -bool Array2::operator==(const Array2 & other) const -{ - if (_nrows != other._nrows) return false; - if (_ncols != other._ncols) return false; - for (int i=0; i < _nrows; ++i) - { - for (int j=0; j < _ncols; ++j) - { - if ((*this)[i][j] != other[i][j]) return false; - } - } - return true; -} - -// Length accessors -int Array2::nrows() const -{ - return _nrows; -} - -int Array2::ncols() const -{ - return _ncols; -} - -// Resize array -void Array2::resize(int nrows, int ncols, long* data) -{ - if (nrows < 0) throw std::invalid_argument("Array2 nrows less than 0"); - if (ncols < 0) throw std::invalid_argument("Array2 ncols less than 0"); - if (nrows == _nrows && ncols == _ncols) return; - deallocateMemory(); - _nrows = nrows; - _ncols = ncols; - if (!data) - { - allocateMemory(); - } - else - { - _ownData = false; - _buffer = data; - allocateRows(); - } -} - -// Set item accessor -Array1 & Array2::operator[](int i) -{ - if (i < 0 || i > _nrows) throw std::out_of_range("Array2 row index out of range"); - return _rows[i]; -} - -// Get item accessor -const Array1 & Array2::operator[](int i) const -{ - if (i < 0 || i > _nrows) throw std::out_of_range("Array2 row index out of range"); - return _rows[i]; -} - -// String output -std::string Array2::asString() const -{ - std::stringstream result; - result << "[ "; - for (int i=0; i < _nrows; ++i) - { - if (i > 0) result << " "; - result << (*this)[i].asString(); - if (i < _nrows-1) result << "," << std::endl; - } - result << " ]" << std::endl; - return result.str(); -} - -// Get view -void Array2::view(int* nrows, int* ncols, long** data) const -{ - *nrows = _nrows; - *ncols = _ncols; - *data = _buffer; -} - -// Private methods -void Array2::allocateMemory() -{ - if (_nrows * _ncols == 0) - { - _ownData = false; - _buffer = 0; - _rows = 0; - } - else - { - _ownData = true; - _buffer = new long[_nrows*_ncols]; - allocateRows(); - } -} - -void Array2::allocateRows() -{ - _rows = new Array1[_nrows]; - for (int i=0; i < _nrows; ++i) - { - _rows[i].resize(_ncols, &_buffer[i*_ncols]); - } -} - -void Array2::deallocateMemory() -{ - if (_ownData && _nrows*_ncols && _buffer) - { - delete [] _rows; - delete [] _buffer; - } - _ownData = false; - _nrows = 0; - _ncols = 0; - _buffer = 0; - _rows = 0; -} diff --git a/numpy/doc/swig/test/Array2.h b/numpy/doc/swig/test/Array2.h deleted file mode 100644 index a6e5bfc30..000000000 --- a/numpy/doc/swig/test/Array2.h +++ /dev/null @@ -1,63 +0,0 @@ -#ifndef ARRAY2_H -#define ARRAY2_H - -#include "Array1.h" -#include -#include - -class Array2 -{ -public: - - // Default constructor - Array2(); - - // Size/array constructor - Array2(int nrows, int ncols, long* data=0); - - // Copy constructor - Array2(const Array2 & source); - - // Destructor - ~Array2(); - - // Assignment operator - Array2 & operator=(const Array2 & source); - - // Equals operator - bool operator==(const Array2 & other) const; - - // Length accessors - int nrows() const; - int ncols() const; - - // Resize array - void resize(int ncols, int nrows, long* data=0); - - // Set item accessor - Array1 & operator[](int i); - - // Get item accessor - const Array1 & operator[](int i) const; - - // String output - std::string asString() const; - - // Get view - void view(int* nrows, int* ncols, long** data) const; - -private: - // Members - bool _ownData; - int _nrows; - int _ncols; - long * _buffer; - Array1 * _rows; - - // Methods - void allocateMemory(); - void allocateRows(); - void deallocateMemory(); -}; - -#endif diff --git a/numpy/doc/swig/test/Farray.cxx b/numpy/doc/swig/test/Farray.cxx deleted file mode 100644 index 3983c333b..000000000 --- a/numpy/doc/swig/test/Farray.cxx +++ /dev/null @@ -1,122 +0,0 @@ -#include "Farray.h" -#include - -// Size constructor -Farray::Farray(int nrows, int ncols) : - _nrows(nrows), _ncols(ncols), _buffer(0) -{ - allocateMemory(); -} - -// Copy constructor -Farray::Farray(const Farray & source) : - _nrows(source._nrows), _ncols(source._ncols) -{ - allocateMemory(); - *this = source; -} - -// Destructor -Farray::~Farray() -{ - delete [] _buffer; -} - -// Assignment operator -Farray & Farray::operator=(const Farray & source) -{ - int nrows = _nrows < source._nrows ? _nrows : source._nrows; - int ncols = _ncols < source._ncols ? _ncols : source._ncols; - for (int i=0; i < nrows; ++i) - { - for (int j=0; j < ncols; ++j) - { - (*this)(i,j) = source(i,j); - } - } - return *this; -} - -// Equals operator -bool Farray::operator==(const Farray & other) const -{ - if (_nrows != other._nrows) return false; - if (_ncols != other._ncols) return false; - for (int i=0; i < _nrows; ++i) - { - for (int j=0; j < _ncols; ++j) - { - if ((*this)(i,j) != other(i,j)) return false; - } - } - return true; -} - -// Length accessors -int Farray::nrows() const -{ - return _nrows; -} - -int Farray::ncols() const -{ - return _ncols; -} - -// Set item accessor -long & Farray::operator()(int i, int j) -{ - if (i < 0 || i > _nrows) throw std::out_of_range("Farray row index out of range"); - if (j < 0 || j > _ncols) throw std::out_of_range("Farray col index out of range"); - return _buffer[offset(i,j)]; -} - -// Get item accessor -const long & Farray::operator()(int i, int j) const -{ - if (i < 0 || i > _nrows) throw std::out_of_range("Farray row index out of range"); - if (j < 0 || j > _ncols) throw std::out_of_range("Farray col index out of range"); - return _buffer[offset(i,j)]; -} - -// String output -std::string Farray::asString() const -{ - std::stringstream result; - result << "[ "; - for (int i=0; i < _nrows; ++i) - { - if (i > 0) result << " "; - result << "["; - for (int j=0; j < _ncols; ++j) - { - result << " " << (*this)(i,j); - if (j < _ncols-1) result << ","; - } - result << " ]"; - if (i < _nrows-1) result << "," << std::endl; - } - result << " ]" << std::endl; - return result.str(); -} - -// Get view -void Farray::view(int* nrows, int* ncols, long** data) const -{ - *nrows = _nrows; - *ncols = _ncols; - *data = _buffer; -} - -// Private methods -void Farray::allocateMemory() -{ - if (_nrows <= 0) throw std::invalid_argument("Farray nrows <= 0"); - if (_ncols <= 0) throw std::invalid_argument("Farray ncols <= 0"); - _buffer = new long[_nrows*_ncols]; -} - -inline int Farray::offset(int i, int j) const -{ - return i + j * _nrows; -} diff --git a/numpy/doc/swig/test/Farray.h b/numpy/doc/swig/test/Farray.h deleted file mode 100644 index 4199a287c..000000000 --- a/numpy/doc/swig/test/Farray.h +++ /dev/null @@ -1,56 +0,0 @@ -#ifndef FARRAY_H -#define FARRAY_H - -#include -#include - -class Farray -{ -public: - - // Size constructor - Farray(int nrows, int ncols); - - // Copy constructor - Farray(const Farray & source); - - // Destructor - ~Farray(); - - // Assignment operator - Farray & operator=(const Farray & source); - - // Equals operator - bool operator==(const Farray & other) const; - - // Length accessors - int nrows() const; - int ncols() const; - - // Set item accessor - long & operator()(int i, int j); - - // Get item accessor - const long & operator()(int i, int j) const; - - // String output - std::string asString() const; - - // Get view - void view(int* nrows, int* ncols, long** data) const; - -private: - // Members - int _nrows; - int _ncols; - long * _buffer; - - // Default constructor: not implemented - Farray(); - - // Methods - void allocateMemory(); - int offset(int i, int j) const; -}; - -#endif diff --git a/numpy/doc/swig/test/Farray.i b/numpy/doc/swig/test/Farray.i deleted file mode 100644 index 25f6cd025..000000000 --- a/numpy/doc/swig/test/Farray.i +++ /dev/null @@ -1,73 +0,0 @@ -// -*- c++ -*- - -%module Farray - -%{ -#define SWIG_FILE_WITH_INIT -#include "Farray.h" -%} - -// Get the NumPy typemaps -%include "../numpy.i" - - // Get the STL typemaps -%include "stl.i" - -// Handle standard exceptions -%include "exception.i" -%exception -{ - try - { - $action - } - catch (const std::invalid_argument& e) - { - SWIG_exception(SWIG_ValueError, e.what()); - } - catch (const std::out_of_range& e) - { - SWIG_exception(SWIG_IndexError, e.what()); - } -} -%init %{ - import_array(); -%} - -// Global ignores -%ignore *::operator=; -%ignore *::operator(); - -// Apply the 2D NumPy typemaps -%apply (int* DIM1 , int* DIM2 , long** ARGOUTVIEW_FARRAY2) - {(int* nrows, int* ncols, long** data )}; - -// Farray support -%include "Farray.h" -%extend Farray -{ - PyObject * __setitem__(PyObject* index, long v) - { - int i, j; - if (!PyArg_ParseTuple(index, "ii:Farray___setitem__",&i,&j)) return NULL; - self->operator()(i,j) = v; - return Py_BuildValue(""); - } - - PyObject * __getitem__(PyObject * index) - { - int i, j; - if (!PyArg_ParseTuple(index, "ii:Farray___getitem__",&i,&j)) return NULL; - return SWIG_From_long(self->operator()(i,j)); - } - - int __len__() - { - return self->nrows() * self->ncols(); - } - - std::string __str__() - { - return self->asString(); - } -} diff --git a/numpy/doc/swig/test/Fortran.cxx b/numpy/doc/swig/test/Fortran.cxx deleted file mode 100644 index 475d21ddc..000000000 --- a/numpy/doc/swig/test/Fortran.cxx +++ /dev/null @@ -1,24 +0,0 @@ -#include -#include -#include -#include "Fortran.h" - -#define TEST_FUNCS(TYPE, SNAME) \ -\ -TYPE SNAME ## SecondElement(TYPE * matrix, int rows, int cols) { \ - TYPE result = matrix[1]; \ - return result; \ -} \ - -TEST_FUNCS(signed char , schar ) -TEST_FUNCS(unsigned char , uchar ) -TEST_FUNCS(short , short ) -TEST_FUNCS(unsigned short , ushort ) -TEST_FUNCS(int , int ) -TEST_FUNCS(unsigned int , uint ) -TEST_FUNCS(long , long ) -TEST_FUNCS(unsigned long , ulong ) -TEST_FUNCS(long long , longLong ) -TEST_FUNCS(unsigned long long, ulongLong) -TEST_FUNCS(float , float ) -TEST_FUNCS(double , double ) diff --git a/numpy/doc/swig/test/Fortran.h b/numpy/doc/swig/test/Fortran.h deleted file mode 100644 index c243bb50f..000000000 --- a/numpy/doc/swig/test/Fortran.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef FORTRAN_H -#define FORTRAN_H - -#define TEST_FUNC_PROTOS(TYPE, SNAME) \ -\ -TYPE SNAME ## SecondElement( TYPE * matrix, int rows, int cols); \ - -TEST_FUNC_PROTOS(signed char , schar ) -TEST_FUNC_PROTOS(unsigned char , uchar ) -TEST_FUNC_PROTOS(short , short ) -TEST_FUNC_PROTOS(unsigned short , ushort ) -TEST_FUNC_PROTOS(int , int ) -TEST_FUNC_PROTOS(unsigned int , uint ) -TEST_FUNC_PROTOS(long , long ) -TEST_FUNC_PROTOS(unsigned long , ulong ) -TEST_FUNC_PROTOS(long long , longLong ) -TEST_FUNC_PROTOS(unsigned long long, ulongLong) -TEST_FUNC_PROTOS(float , float ) -TEST_FUNC_PROTOS(double , double ) - -#endif diff --git a/numpy/doc/swig/test/Fortran.i b/numpy/doc/swig/test/Fortran.i deleted file mode 100644 index 131790dd6..000000000 --- a/numpy/doc/swig/test/Fortran.i +++ /dev/null @@ -1,36 +0,0 @@ -// -*- c++ -*- -%module Fortran - -%{ -#define SWIG_FILE_WITH_INIT -#include "Fortran.h" -%} - -// Get the NumPy typemaps -%include "../numpy.i" - -%init %{ - import_array(); -%} - -%define %apply_numpy_typemaps(TYPE) - -%apply (TYPE* IN_FARRAY2, int DIM1, int DIM2) {(TYPE* matrix, int rows, int cols)}; - -%enddef /* %apply_numpy_typemaps() macro */ - -%apply_numpy_typemaps(signed char ) -%apply_numpy_typemaps(unsigned char ) -%apply_numpy_typemaps(short ) -%apply_numpy_typemaps(unsigned short ) -%apply_numpy_typemaps(int ) -%apply_numpy_typemaps(unsigned int ) -%apply_numpy_typemaps(long ) -%apply_numpy_typemaps(unsigned long ) -%apply_numpy_typemaps(long long ) -%apply_numpy_typemaps(unsigned long long) -%apply_numpy_typemaps(float ) -%apply_numpy_typemaps(double ) - -// Include the header file to be wrapped -%include "Fortran.h" diff --git a/numpy/doc/swig/test/Makefile b/numpy/doc/swig/test/Makefile deleted file mode 100644 index 5360b1ced..000000000 --- a/numpy/doc/swig/test/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -# SWIG -INTERFACES = Array.i Farray.i Vector.i Matrix.i Tensor.i Fortran.i -WRAPPERS = $(INTERFACES:.i=_wrap.cxx) -PROXIES = $(INTERFACES:.i=.py ) - -# Default target: build the tests -.PHONY : all -all: $(WRAPPERS) Array1.cxx Array1.h Farray.cxx Farray.h Vector.cxx Vector.h \ - Matrix.cxx Matrix.h Tensor.cxx Tensor.h Fortran.h Fortran.cxx - ./setup.py build_ext -i - -# Test target: run the tests -.PHONY : test -test: all - python testVector.py - python testMatrix.py - python testTensor.py - python testArray.py - python testFarray.py - python testFortran.py - -# Rule: %.i -> %_wrap.cxx -%_wrap.cxx: %.i %.h ../numpy.i - swig -c++ -python $< -%_wrap.cxx: %.i %1.h %2.h ../numpy.i - swig -c++ -python $< - -# Clean target -.PHONY : clean -clean: - $(RM) -r build - $(RM) *.so - $(RM) $(WRAPPERS) - $(RM) $(PROXIES) diff --git a/numpy/doc/swig/test/Matrix.cxx b/numpy/doc/swig/test/Matrix.cxx deleted file mode 100644 index b953d7017..000000000 --- a/numpy/doc/swig/test/Matrix.cxx +++ /dev/null @@ -1,112 +0,0 @@ -#include -#include -#include -#include "Matrix.h" - -// The following macro defines a family of functions that work with 2D -// arrays with the forms -// -// TYPE SNAMEDet( TYPE matrix[2][2]); -// TYPE SNAMEMax( TYPE * matrix, int rows, int cols); -// TYPE SNAMEMin( int rows, int cols, TYPE * matrix); -// void SNAMEScale( TYPE matrix[3][3]); -// void SNAMEFloor( TYPE * array, int rows, int cols, TYPE floor); -// void SNAMECeil( int rows, int cols, TYPE * array, TYPE ceil); -// void SNAMELUSplit(TYPE in[3][3], TYPE lower[3][3], TYPE upper[3][3]); -// -// for any specified type TYPE (for example: short, unsigned int, long -// long, etc.) with given short name SNAME (for example: short, uint, -// longLong, etc.). The macro is then expanded for the given -// TYPE/SNAME pairs. The resulting functions are for testing numpy -// interfaces, respectively, for: -// -// * 2D input arrays, hard-coded length -// * 2D input arrays -// * 2D input arrays, data last -// * 2D in-place arrays, hard-coded lengths -// * 2D in-place arrays -// * 2D in-place arrays, data last -// * 2D argout arrays, hard-coded length -// -#define TEST_FUNCS(TYPE, SNAME) \ -\ -TYPE SNAME ## Det(TYPE matrix[2][2]) { \ - return matrix[0][0]*matrix[1][1] - matrix[0][1]*matrix[1][0]; \ -} \ -\ -TYPE SNAME ## Max(TYPE * matrix, int rows, int cols) { \ - int i, j, index; \ - TYPE result = matrix[0]; \ - for (j=0; j result) result = matrix[index]; \ - } \ - } \ - return result; \ -} \ -\ -TYPE SNAME ## Min(int rows, int cols, TYPE * matrix) { \ - int i, j, index; \ - TYPE result = matrix[0]; \ - for (j=0; j ceil) array[index] = ceil; \ - } \ - } \ -} \ -\ -void SNAME ## LUSplit(TYPE matrix[3][3], TYPE lower[3][3], TYPE upper[3][3]) { \ - for (int i=0; i<3; ++i) { \ - for (int j=0; j<3; ++j) { \ - if (i >= j) { \ - lower[i][j] = matrix[i][j]; \ - upper[i][j] = 0; \ - } else { \ - lower[i][j] = 0; \ - upper[i][j] = matrix[i][j]; \ - } \ - } \ - } \ -} - -TEST_FUNCS(signed char , schar ) -TEST_FUNCS(unsigned char , uchar ) -TEST_FUNCS(short , short ) -TEST_FUNCS(unsigned short , ushort ) -TEST_FUNCS(int , int ) -TEST_FUNCS(unsigned int , uint ) -TEST_FUNCS(long , long ) -TEST_FUNCS(unsigned long , ulong ) -TEST_FUNCS(long long , longLong ) -TEST_FUNCS(unsigned long long, ulongLong) -TEST_FUNCS(float , float ) -TEST_FUNCS(double , double ) diff --git a/numpy/doc/swig/test/Matrix.h b/numpy/doc/swig/test/Matrix.h deleted file mode 100644 index f37836cc4..000000000 --- a/numpy/doc/swig/test/Matrix.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef MATRIX_H -#define MATRIX_H - -// The following macro defines the prototypes for a family of -// functions that work with 2D arrays with the forms -// -// TYPE SNAMEDet( TYPE matrix[2][2]); -// TYPE SNAMEMax( TYPE * matrix, int rows, int cols); -// TYPE SNAMEMin( int rows, int cols, TYPE * matrix); -// void SNAMEScale( TYPE array[3][3]); -// void SNAMEFloor( TYPE * array, int rows, int cols, TYPE floor); -// void SNAMECeil( int rows, int cols, TYPE * array, TYPE ceil ); -// void SNAMELUSplit(TYPE in[3][3], TYPE lower[3][3], TYPE upper[3][3]); -// -// for any specified type TYPE (for example: short, unsigned int, long -// long, etc.) with given short name SNAME (for example: short, uint, -// longLong, etc.). The macro is then expanded for the given -// TYPE/SNAME pairs. The resulting functions are for testing numpy -// interfaces, respectively, for: -// -// * 2D input arrays, hard-coded lengths -// * 2D input arrays -// * 2D input arrays, data last -// * 2D in-place arrays, hard-coded lengths -// * 2D in-place arrays -// * 2D in-place arrays, data last -// * 2D argout arrays, hard-coded length -// -#define TEST_FUNC_PROTOS(TYPE, SNAME) \ -\ -TYPE SNAME ## Det( TYPE matrix[2][2]); \ -TYPE SNAME ## Max( TYPE * matrix, int rows, int cols); \ -TYPE SNAME ## Min( int rows, int cols, TYPE * matrix); \ -void SNAME ## Scale( TYPE array[3][3], TYPE val); \ -void SNAME ## Floor( TYPE * array, int rows, int cols, TYPE floor); \ -void SNAME ## Ceil( int rows, int cols, TYPE * array, TYPE ceil ); \ -void SNAME ## LUSplit(TYPE matrix[3][3], TYPE lower[3][3], TYPE upper[3][3]); - -TEST_FUNC_PROTOS(signed char , schar ) -TEST_FUNC_PROTOS(unsigned char , uchar ) -TEST_FUNC_PROTOS(short , short ) -TEST_FUNC_PROTOS(unsigned short , ushort ) -TEST_FUNC_PROTOS(int , int ) -TEST_FUNC_PROTOS(unsigned int , uint ) -TEST_FUNC_PROTOS(long , long ) -TEST_FUNC_PROTOS(unsigned long , ulong ) -TEST_FUNC_PROTOS(long long , longLong ) -TEST_FUNC_PROTOS(unsigned long long, ulongLong) -TEST_FUNC_PROTOS(float , float ) -TEST_FUNC_PROTOS(double , double ) - -#endif diff --git a/numpy/doc/swig/test/Matrix.i b/numpy/doc/swig/test/Matrix.i deleted file mode 100644 index e721397a0..000000000 --- a/numpy/doc/swig/test/Matrix.i +++ /dev/null @@ -1,45 +0,0 @@ -// -*- c++ -*- -%module Matrix - -%{ -#define SWIG_FILE_WITH_INIT -#include "Matrix.h" -%} - -// Get the NumPy typemaps -%include "../numpy.i" - -%init %{ - import_array(); -%} - -%define %apply_numpy_typemaps(TYPE) - -%apply (TYPE IN_ARRAY2[ANY][ANY]) {(TYPE matrix[ANY][ANY])}; -%apply (TYPE* IN_ARRAY2, int DIM1, int DIM2) {(TYPE* matrix, int rows, int cols)}; -%apply (int DIM1, int DIM2, TYPE* IN_ARRAY2) {(int rows, int cols, TYPE* matrix)}; - -%apply (TYPE INPLACE_ARRAY2[ANY][ANY]) {(TYPE array[3][3])}; -%apply (TYPE* INPLACE_ARRAY2, int DIM1, int DIM2) {(TYPE* array, int rows, int cols)}; -%apply (int DIM1, int DIM2, TYPE* INPLACE_ARRAY2) {(int rows, int cols, TYPE* array)}; - -%apply (TYPE ARGOUT_ARRAY2[ANY][ANY]) {(TYPE lower[3][3])}; -%apply (TYPE ARGOUT_ARRAY2[ANY][ANY]) {(TYPE upper[3][3])}; - -%enddef /* %apply_numpy_typemaps() macro */ - -%apply_numpy_typemaps(signed char ) -%apply_numpy_typemaps(unsigned char ) -%apply_numpy_typemaps(short ) -%apply_numpy_typemaps(unsigned short ) -%apply_numpy_typemaps(int ) -%apply_numpy_typemaps(unsigned int ) -%apply_numpy_typemaps(long ) -%apply_numpy_typemaps(unsigned long ) -%apply_numpy_typemaps(long long ) -%apply_numpy_typemaps(unsigned long long) -%apply_numpy_typemaps(float ) -%apply_numpy_typemaps(double ) - -// Include the header file to be wrapped -%include "Matrix.h" diff --git a/numpy/doc/swig/test/Tensor.cxx b/numpy/doc/swig/test/Tensor.cxx deleted file mode 100644 index dce595291..000000000 --- a/numpy/doc/swig/test/Tensor.cxx +++ /dev/null @@ -1,131 +0,0 @@ -#include -#include -#include -#include "Tensor.h" - -// The following macro defines a family of functions that work with 3D -// arrays with the forms -// -// TYPE SNAMENorm( TYPE tensor[2][2][2]); -// TYPE SNAMEMax( TYPE * tensor, int rows, int cols, int num); -// TYPE SNAMEMin( int rows, int cols, int num, TYPE * tensor); -// void SNAMEScale( TYPE tensor[3][3][3]); -// void SNAMEFloor( TYPE * array, int rows, int cols, int num, TYPE floor); -// void SNAMECeil( int rows, int cols, int num, TYPE * array, TYPE ceil); -// void SNAMELUSplit(TYPE in[2][2][2], TYPE lower[2][2][2], TYPE upper[2][2][2]); -// -// for any specified type TYPE (for example: short, unsigned int, long -// long, etc.) with given short name SNAME (for example: short, uint, -// longLong, etc.). The macro is then expanded for the given -// TYPE/SNAME pairs. The resulting functions are for testing numpy -// interfaces, respectively, for: -// -// * 3D input arrays, hard-coded length -// * 3D input arrays -// * 3D input arrays, data last -// * 3D in-place arrays, hard-coded lengths -// * 3D in-place arrays -// * 3D in-place arrays, data last -// * 3D argout arrays, hard-coded length -// -#define TEST_FUNCS(TYPE, SNAME) \ -\ -TYPE SNAME ## Norm(TYPE tensor[2][2][2]) { \ - double result = 0; \ - for (int k=0; k<2; ++k) \ - for (int j=0; j<2; ++j) \ - for (int i=0; i<2; ++i) \ - result += tensor[i][j][k] * tensor[i][j][k]; \ - return (TYPE)sqrt(result/8); \ -} \ -\ -TYPE SNAME ## Max(TYPE * tensor, int rows, int cols, int num) { \ - int i, j, k, index; \ - TYPE result = tensor[0]; \ - for (k=0; k result) result = tensor[index]; \ - } \ - } \ - } \ - return result; \ -} \ -\ -TYPE SNAME ## Min(int rows, int cols, int num, TYPE * tensor) { \ - int i, j, k, index; \ - TYPE result = tensor[0]; \ - for (k=0; k ceil) array[index] = ceil; \ - } \ - } \ - } \ -} \ -\ -void SNAME ## LUSplit(TYPE tensor[2][2][2], TYPE lower[2][2][2], \ - TYPE upper[2][2][2]) { \ - int sum; \ - for (int k=0; k<2; ++k) { \ - for (int j=0; j<2; ++j) { \ - for (int i=0; i<2; ++i) { \ - sum = i + j + k; \ - if (sum < 2) { \ - lower[i][j][k] = tensor[i][j][k]; \ - upper[i][j][k] = 0; \ - } else { \ - upper[i][j][k] = tensor[i][j][k]; \ - lower[i][j][k] = 0; \ - } \ - } \ - } \ - } \ -} - -TEST_FUNCS(signed char , schar ) -TEST_FUNCS(unsigned char , uchar ) -TEST_FUNCS(short , short ) -TEST_FUNCS(unsigned short , ushort ) -TEST_FUNCS(int , int ) -TEST_FUNCS(unsigned int , uint ) -TEST_FUNCS(long , long ) -TEST_FUNCS(unsigned long , ulong ) -TEST_FUNCS(long long , longLong ) -TEST_FUNCS(unsigned long long, ulongLong) -TEST_FUNCS(float , float ) -TEST_FUNCS(double , double ) diff --git a/numpy/doc/swig/test/Tensor.h b/numpy/doc/swig/test/Tensor.h deleted file mode 100644 index d60eb2d2e..000000000 --- a/numpy/doc/swig/test/Tensor.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef TENSOR_H -#define TENSOR_H - -// The following macro defines the prototypes for a family of -// functions that work with 3D arrays with the forms -// -// TYPE SNAMENorm( TYPE tensor[2][2][2]); -// TYPE SNAMEMax( TYPE * tensor, int rows, int cols, int num); -// TYPE SNAMEMin( int rows, int cols, int num, TYPE * tensor); -// void SNAMEScale( TYPE array[3][3][3]); -// void SNAMEFloor( TYPE * array, int rows, int cols, int num, TYPE floor); -// void SNAMECeil( int rows, int cols, int num, TYPE * array, TYPE ceil ); -// void SNAMELUSplit(TYPE in[3][3][3], TYPE lower[3][3][3], TYPE upper[3][3][3]); -// -// for any specified type TYPE (for example: short, unsigned int, long -// long, etc.) with given short name SNAME (for example: short, uint, -// longLong, etc.). The macro is then expanded for the given -// TYPE/SNAME pairs. The resulting functions are for testing numpy -// interfaces, respectively, for: -// -// * 3D input arrays, hard-coded lengths -// * 3D input arrays -// * 3D input arrays, data last -// * 3D in-place arrays, hard-coded lengths -// * 3D in-place arrays -// * 3D in-place arrays, data last -// * 3D argout arrays, hard-coded length -// -#define TEST_FUNC_PROTOS(TYPE, SNAME) \ -\ -TYPE SNAME ## Norm( TYPE tensor[2][2][2]); \ -TYPE SNAME ## Max( TYPE * tensor, int rows, int cols, int num); \ -TYPE SNAME ## Min( int rows, int cols, int num, TYPE * tensor); \ -void SNAME ## Scale( TYPE array[3][3][3], TYPE val); \ -void SNAME ## Floor( TYPE * array, int rows, int cols, int num, TYPE floor); \ -void SNAME ## Ceil( int rows, int cols, int num, TYPE * array, TYPE ceil ); \ -void SNAME ## LUSplit(TYPE tensor[2][2][2], TYPE lower[2][2][2], TYPE upper[2][2][2]); - -TEST_FUNC_PROTOS(signed char , schar ) -TEST_FUNC_PROTOS(unsigned char , uchar ) -TEST_FUNC_PROTOS(short , short ) -TEST_FUNC_PROTOS(unsigned short , ushort ) -TEST_FUNC_PROTOS(int , int ) -TEST_FUNC_PROTOS(unsigned int , uint ) -TEST_FUNC_PROTOS(long , long ) -TEST_FUNC_PROTOS(unsigned long , ulong ) -TEST_FUNC_PROTOS(long long , longLong ) -TEST_FUNC_PROTOS(unsigned long long, ulongLong) -TEST_FUNC_PROTOS(float , float ) -TEST_FUNC_PROTOS(double , double ) - -#endif diff --git a/numpy/doc/swig/test/Tensor.i b/numpy/doc/swig/test/Tensor.i deleted file mode 100644 index a1198dc9e..000000000 --- a/numpy/doc/swig/test/Tensor.i +++ /dev/null @@ -1,49 +0,0 @@ -// -*- c++ -*- -%module Tensor - -%{ -#define SWIG_FILE_WITH_INIT -#include "Tensor.h" -%} - -// Get the NumPy typemaps -%include "../numpy.i" - -%init %{ - import_array(); -%} - -%define %apply_numpy_typemaps(TYPE) - -%apply (TYPE IN_ARRAY3[ANY][ANY][ANY]) {(TYPE tensor[ANY][ANY][ANY])}; -%apply (TYPE* IN_ARRAY3, int DIM1, int DIM2, int DIM3) - {(TYPE* tensor, int rows, int cols, int num)}; -%apply (int DIM1, int DIM2, int DIM3, TYPE* IN_ARRAY3) - {(int rows, int cols, int num, TYPE* tensor)}; - -%apply (TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) {(TYPE array[3][3][3])}; -%apply (TYPE* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) - {(TYPE* array, int rows, int cols, int num)}; -%apply (int DIM1, int DIM2, int DIM3, TYPE* INPLACE_ARRAY3) - {(int rows, int cols, int num, TYPE* array)}; - -%apply (TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) {(TYPE lower[2][2][2])}; -%apply (TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) {(TYPE upper[2][2][2])}; - -%enddef /* %apply_numpy_typemaps() macro */ - -%apply_numpy_typemaps(signed char ) -%apply_numpy_typemaps(unsigned char ) -%apply_numpy_typemaps(short ) -%apply_numpy_typemaps(unsigned short ) -%apply_numpy_typemaps(int ) -%apply_numpy_typemaps(unsigned int ) -%apply_numpy_typemaps(long ) -%apply_numpy_typemaps(unsigned long ) -%apply_numpy_typemaps(long long ) -%apply_numpy_typemaps(unsigned long long) -%apply_numpy_typemaps(float ) -%apply_numpy_typemaps(double ) - -// Include the header file to be wrapped -%include "Tensor.h" diff --git a/numpy/doc/swig/test/Vector.cxx b/numpy/doc/swig/test/Vector.cxx deleted file mode 100644 index 2c90404da..000000000 --- a/numpy/doc/swig/test/Vector.cxx +++ /dev/null @@ -1,100 +0,0 @@ -#include -#include -#include -#include "Vector.h" - -// The following macro defines a family of functions that work with 1D -// arrays with the forms -// -// TYPE SNAMELength( TYPE vector[3]); -// TYPE SNAMEProd( TYPE * series, int size); -// TYPE SNAMESum( int size, TYPE * series); -// void SNAMEReverse(TYPE array[3]); -// void SNAMEOnes( TYPE * array, int size); -// void SNAMEZeros( int size, TYPE * array); -// void SNAMEEOSplit(TYPE vector[3], TYPE even[3], odd[3]); -// void SNAMETwos( TYPE * twoVec, int size); -// void SNAMEThrees( int size, TYPE * threeVec); -// -// for any specified type TYPE (for example: short, unsigned int, long -// long, etc.) with given short name SNAME (for example: short, uint, -// longLong, etc.). The macro is then expanded for the given -// TYPE/SNAME pairs. The resulting functions are for testing numpy -// interfaces, respectively, for: -// -// * 1D input arrays, hard-coded length -// * 1D input arrays -// * 1D input arrays, data last -// * 1D in-place arrays, hard-coded length -// * 1D in-place arrays -// * 1D in-place arrays, data last -// * 1D argout arrays, hard-coded length -// * 1D argout arrays -// * 1D argout arrays, data last -// -#define TEST_FUNCS(TYPE, SNAME) \ -\ -TYPE SNAME ## Length(TYPE vector[3]) { \ - double result = 0; \ - for (int i=0; i<3; ++i) result += vector[i]*vector[i]; \ - return (TYPE)sqrt(result); \ -} \ -\ -TYPE SNAME ## Prod(TYPE * series, int size) { \ - TYPE result = 1; \ - for (int i=0; i>sys.stderr, self.typeStr, "... ", - second = Fortran.__dict__[self.typeStr + "SecondElement"] - matrix = np.arange(9).reshape(3, 3).astype(self.typeCode) - self.assertEquals(second(matrix), 3) - - def testSecondElementFortran(self): - "Test luSplit function with a Fortran-array" - print >>sys.stderr, self.typeStr, "... ", - second = Fortran.__dict__[self.typeStr + "SecondElement"] - matrix = np.asfortranarray(np.arange(9).reshape(3, 3), - self.typeCode) - self.assertEquals(second(matrix), 3) - - def testSecondElementObject(self): - "Test luSplit function with a Fortran-array" - print >>sys.stderr, self.typeStr, "... ", - second = Fortran.__dict__[self.typeStr + "SecondElement"] - matrix = np.asfortranarray([[0,1,2],[3,4,5],[6,7,8]], self.typeCode) - self.assertEquals(second(matrix), 3) - -###################################################################### - -class scharTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "schar" - self.typeCode = "b" - -###################################################################### - -class ucharTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "uchar" - self.typeCode = "B" - -###################################################################### - -class shortTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "short" - self.typeCode = "h" - -###################################################################### - -class ushortTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "ushort" - self.typeCode = "H" - -###################################################################### - -class intTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "int" - self.typeCode = "i" - -###################################################################### - -class uintTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "uint" - self.typeCode = "I" - -###################################################################### - -class longTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "long" - self.typeCode = "l" - -###################################################################### - -class ulongTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "ulong" - self.typeCode = "L" - -###################################################################### - -class longLongTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "longLong" - self.typeCode = "q" - -###################################################################### - -class ulongLongTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "ulongLong" - self.typeCode = "Q" - -###################################################################### - -class floatTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "float" - self.typeCode = "f" - -###################################################################### - -class doubleTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - -###################################################################### - -if __name__ == "__main__": - - # Build the test suite - suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) - - # Execute the test suite - print "Testing 2D Functions of Module Matrix" - print "NumPy version", np.__version__ - print - result = unittest.TextTestRunner(verbosity=2).run(suite) - sys.exit(len(result.errors) + len(result.failures)) diff --git a/numpy/doc/swig/test/testMatrix.py b/numpy/doc/swig/test/testMatrix.py deleted file mode 100755 index 12061702d..000000000 --- a/numpy/doc/swig/test/testMatrix.py +++ /dev/null @@ -1,361 +0,0 @@ -#! /usr/bin/env python - -# System imports -from distutils.util import get_platform -import os -import sys -import unittest - -# Import NumPy -import numpy as np -major, minor = [ int(d) for d in np.__version__.split(".")[:2] ] -if major == 0: BadListError = TypeError -else: BadListError = ValueError - -import Matrix - -###################################################################### - -class MatrixTestCase(unittest.TestCase): - - def __init__(self, methodName="runTests"): - unittest.TestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - - # Test (type IN_ARRAY2[ANY][ANY]) typemap - def testDet(self): - "Test det function" - print >>sys.stderr, self.typeStr, "... ", - det = Matrix.__dict__[self.typeStr + "Det"] - matrix = [[8,7],[6,9]] - self.assertEquals(det(matrix), 30) - - # Test (type IN_ARRAY2[ANY][ANY]) typemap - def testDetBadList(self): - "Test det function with bad list" - print >>sys.stderr, self.typeStr, "... ", - det = Matrix.__dict__[self.typeStr + "Det"] - matrix = [[8,7], ["e", "pi"]] - self.assertRaises(BadListError, det, matrix) - - # Test (type IN_ARRAY2[ANY][ANY]) typemap - def testDetWrongDim(self): - "Test det function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - det = Matrix.__dict__[self.typeStr + "Det"] - matrix = [8,7] - self.assertRaises(TypeError, det, matrix) - - # Test (type IN_ARRAY2[ANY][ANY]) typemap - def testDetWrongSize(self): - "Test det function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - det = Matrix.__dict__[self.typeStr + "Det"] - matrix = [[8,7,6], [5,4,3], [2,1,0]] - self.assertRaises(TypeError, det, matrix) - - # Test (type IN_ARRAY2[ANY][ANY]) typemap - def testDetNonContainer(self): - "Test det function with non-container" - print >>sys.stderr, self.typeStr, "... ", - det = Matrix.__dict__[self.typeStr + "Det"] - self.assertRaises(TypeError, det, None) - - # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap - def testMax(self): - "Test max function" - print >>sys.stderr, self.typeStr, "... ", - max = Matrix.__dict__[self.typeStr + "Max"] - matrix = [[6,5,4],[3,2,1]] - self.assertEquals(max(matrix), 6) - - # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap - def testMaxBadList(self): - "Test max function with bad list" - print >>sys.stderr, self.typeStr, "... ", - max = Matrix.__dict__[self.typeStr + "Max"] - matrix = [[6,"five",4], ["three", 2, "one"]] - self.assertRaises(BadListError, max, matrix) - - # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap - def testMaxNonContainer(self): - "Test max function with non-container" - print >>sys.stderr, self.typeStr, "... ", - max = Matrix.__dict__[self.typeStr + "Max"] - self.assertRaises(TypeError, max, None) - - # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap - def testMaxWrongDim(self): - "Test max function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - max = Matrix.__dict__[self.typeStr + "Max"] - self.assertRaises(TypeError, max, [0, 1, 2, 3]) - - # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap - def testMin(self): - "Test min function" - print >>sys.stderr, self.typeStr, "... ", - min = Matrix.__dict__[self.typeStr + "Min"] - matrix = [[9,8],[7,6],[5,4]] - self.assertEquals(min(matrix), 4) - - # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap - def testMinBadList(self): - "Test min function with bad list" - print >>sys.stderr, self.typeStr, "... ", - min = Matrix.__dict__[self.typeStr + "Min"] - matrix = [["nine","eight"], ["seven","six"]] - self.assertRaises(BadListError, min, matrix) - - # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap - def testMinWrongDim(self): - "Test min function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - min = Matrix.__dict__[self.typeStr + "Min"] - self.assertRaises(TypeError, min, [1,3,5,7,9]) - - # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap - def testMinNonContainer(self): - "Test min function with non-container" - print >>sys.stderr, self.typeStr, "... ", - min = Matrix.__dict__[self.typeStr + "Min"] - self.assertRaises(TypeError, min, False) - - # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap - def testScale(self): - "Test scale function" - print >>sys.stderr, self.typeStr, "... ", - scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = np.array([[1,2,3],[2,1,2],[3,2,1]],self.typeCode) - scale(matrix,4) - self.assertEquals((matrix == [[4,8,12],[8,4,8],[12,8,4]]).all(), True) - - # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap - def testScaleWrongDim(self): - "Test scale function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = np.array([1,2,2,1],self.typeCode) - self.assertRaises(TypeError, scale, matrix) - - # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap - def testScaleWrongSize(self): - "Test scale function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = np.array([[1,2],[2,1]],self.typeCode) - self.assertRaises(TypeError, scale, matrix) - - # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap - def testScaleWrongType(self): - "Test scale function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = np.array([[1,2,3],[2,1,2],[3,2,1]],'c') - self.assertRaises(TypeError, scale, matrix) - - # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap - def testScaleNonArray(self): - "Test scale function with non-array" - print >>sys.stderr, self.typeStr, "... ", - scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = [[1,2,3],[2,1,2],[3,2,1]] - self.assertRaises(TypeError, scale, matrix) - - # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap - def testFloor(self): - "Test floor function" - print >>sys.stderr, self.typeStr, "... ", - floor = Matrix.__dict__[self.typeStr + "Floor"] - matrix = np.array([[6,7],[8,9]],self.typeCode) - floor(matrix,7) - np.testing.assert_array_equal(matrix, np.array([[7,7],[8,9]])) - - # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap - def testFloorWrongDim(self): - "Test floor function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - floor = Matrix.__dict__[self.typeStr + "Floor"] - matrix = np.array([6,7,8,9],self.typeCode) - self.assertRaises(TypeError, floor, matrix) - - # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap - def testFloorWrongType(self): - "Test floor function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - floor = Matrix.__dict__[self.typeStr + "Floor"] - matrix = np.array([[6,7], [8,9]],'c') - self.assertRaises(TypeError, floor, matrix) - - # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap - def testFloorNonArray(self): - "Test floor function with non-array" - print >>sys.stderr, self.typeStr, "... ", - floor = Matrix.__dict__[self.typeStr + "Floor"] - matrix = [[6,7], [8,9]] - self.assertRaises(TypeError, floor, matrix) - - # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap - def testCeil(self): - "Test ceil function" - print >>sys.stderr, self.typeStr, "... ", - ceil = Matrix.__dict__[self.typeStr + "Ceil"] - matrix = np.array([[1,2],[3,4]],self.typeCode) - ceil(matrix,3) - np.testing.assert_array_equal(matrix, np.array([[1,2],[3,3]])) - - # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap - def testCeilWrongDim(self): - "Test ceil function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - ceil = Matrix.__dict__[self.typeStr + "Ceil"] - matrix = np.array([1,2,3,4],self.typeCode) - self.assertRaises(TypeError, ceil, matrix) - - # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap - def testCeilWrongType(self): - "Test ceil function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - ceil = Matrix.__dict__[self.typeStr + "Ceil"] - matrix = np.array([[1,2], [3,4]],'c') - self.assertRaises(TypeError, ceil, matrix) - - # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap - def testCeilNonArray(self): - "Test ceil function with non-array" - print >>sys.stderr, self.typeStr, "... ", - ceil = Matrix.__dict__[self.typeStr + "Ceil"] - matrix = [[1,2], [3,4]] - self.assertRaises(TypeError, ceil, matrix) - - # Test (type ARGOUT_ARRAY2[ANY][ANY]) typemap - def testLUSplit(self): - "Test luSplit function" - print >>sys.stderr, self.typeStr, "... ", - luSplit = Matrix.__dict__[self.typeStr + "LUSplit"] - lower, upper = luSplit([[1,2,3],[4,5,6],[7,8,9]]) - self.assertEquals((lower == [[1,0,0],[4,5,0],[7,8,9]]).all(), True) - self.assertEquals((upper == [[0,2,3],[0,0,6],[0,0,0]]).all(), True) - -###################################################################### - -class scharTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "schar" - self.typeCode = "b" - -###################################################################### - -class ucharTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "uchar" - self.typeCode = "B" - -###################################################################### - -class shortTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "short" - self.typeCode = "h" - -###################################################################### - -class ushortTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "ushort" - self.typeCode = "H" - -###################################################################### - -class intTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "int" - self.typeCode = "i" - -###################################################################### - -class uintTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "uint" - self.typeCode = "I" - -###################################################################### - -class longTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "long" - self.typeCode = "l" - -###################################################################### - -class ulongTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "ulong" - self.typeCode = "L" - -###################################################################### - -class longLongTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "longLong" - self.typeCode = "q" - -###################################################################### - -class ulongLongTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "ulongLong" - self.typeCode = "Q" - -###################################################################### - -class floatTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "float" - self.typeCode = "f" - -###################################################################### - -class doubleTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - -###################################################################### - -if __name__ == "__main__": - - # Build the test suite - suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) - - # Execute the test suite - print "Testing 2D Functions of Module Matrix" - print "NumPy version", np.__version__ - print - result = unittest.TextTestRunner(verbosity=2).run(suite) - sys.exit(len(result.errors) + len(result.failures)) diff --git a/numpy/doc/swig/test/testTensor.py b/numpy/doc/swig/test/testTensor.py deleted file mode 100755 index 3d0ce097e..000000000 --- a/numpy/doc/swig/test/testTensor.py +++ /dev/null @@ -1,401 +0,0 @@ -#! /usr/bin/env python - -# System imports -from distutils.util import get_platform -from math import sqrt -import os -import sys -import unittest - -# Import NumPy -import numpy as np -major, minor = [ int(d) for d in np.__version__.split(".")[:2] ] -if major == 0: BadListError = TypeError -else: BadListError = ValueError - -import Tensor - -###################################################################### - -class TensorTestCase(unittest.TestCase): - - def __init__(self, methodName="runTests"): - unittest.TestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - self.result = sqrt(28.0/8) - - # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap - def testNorm(self): - "Test norm function" - print >>sys.stderr, self.typeStr, "... ", - norm = Tensor.__dict__[self.typeStr + "Norm"] - tensor = [[[0,1], [2,3]], - [[3,2], [1,0]]] - if isinstance(self.result, int): - self.assertEquals(norm(tensor), self.result) - else: - self.assertAlmostEqual(norm(tensor), self.result, 6) - - # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap - def testNormBadList(self): - "Test norm function with bad list" - print >>sys.stderr, self.typeStr, "... ", - norm = Tensor.__dict__[self.typeStr + "Norm"] - tensor = [[[0,"one"],[2,3]], - [[3,"two"],[1,0]]] - self.assertRaises(BadListError, norm, tensor) - - # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap - def testNormWrongDim(self): - "Test norm function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - norm = Tensor.__dict__[self.typeStr + "Norm"] - tensor = [[0,1,2,3], - [3,2,1,0]] - self.assertRaises(TypeError, norm, tensor) - - # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap - def testNormWrongSize(self): - "Test norm function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - norm = Tensor.__dict__[self.typeStr + "Norm"] - tensor = [[[0,1,0], [2,3,2]], - [[3,2,3], [1,0,1]]] - self.assertRaises(TypeError, norm, tensor) - - # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap - def testNormNonContainer(self): - "Test norm function with non-container" - print >>sys.stderr, self.typeStr, "... ", - norm = Tensor.__dict__[self.typeStr + "Norm"] - self.assertRaises(TypeError, norm, None) - - # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testMax(self): - "Test max function" - print >>sys.stderr, self.typeStr, "... ", - max = Tensor.__dict__[self.typeStr + "Max"] - tensor = [[[1,2], [3,4]], - [[5,6], [7,8]]] - self.assertEquals(max(tensor), 8) - - # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testMaxBadList(self): - "Test max function with bad list" - print >>sys.stderr, self.typeStr, "... ", - max = Tensor.__dict__[self.typeStr + "Max"] - tensor = [[[1,"two"], [3,4]], - [[5,"six"], [7,8]]] - self.assertRaises(BadListError, max, tensor) - - # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testMaxNonContainer(self): - "Test max function with non-container" - print >>sys.stderr, self.typeStr, "... ", - max = Tensor.__dict__[self.typeStr + "Max"] - self.assertRaises(TypeError, max, None) - - # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testMaxWrongDim(self): - "Test max function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - max = Tensor.__dict__[self.typeStr + "Max"] - self.assertRaises(TypeError, max, [0, -1, 2, -3]) - - # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap - def testMin(self): - "Test min function" - print >>sys.stderr, self.typeStr, "... ", - min = Tensor.__dict__[self.typeStr + "Min"] - tensor = [[[9,8], [7,6]], - [[5,4], [3,2]]] - self.assertEquals(min(tensor), 2) - - # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap - def testMinBadList(self): - "Test min function with bad list" - print >>sys.stderr, self.typeStr, "... ", - min = Tensor.__dict__[self.typeStr + "Min"] - tensor = [[["nine",8], [7,6]], - [["five",4], [3,2]]] - self.assertRaises(BadListError, min, tensor) - - # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap - def testMinNonContainer(self): - "Test min function with non-container" - print >>sys.stderr, self.typeStr, "... ", - min = Tensor.__dict__[self.typeStr + "Min"] - self.assertRaises(TypeError, min, True) - - # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap - def testMinWrongDim(self): - "Test min function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - min = Tensor.__dict__[self.typeStr + "Min"] - self.assertRaises(TypeError, min, [[1,3],[5,7]]) - - # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap - def testScale(self): - "Test scale function" - print >>sys.stderr, self.typeStr, "... ", - scale = Tensor.__dict__[self.typeStr + "Scale"] - tensor = np.array([[[1,0,1], [0,1,0], [1,0,1]], - [[0,1,0], [1,0,1], [0,1,0]], - [[1,0,1], [0,1,0], [1,0,1]]],self.typeCode) - scale(tensor,4) - self.assertEquals((tensor == [[[4,0,4], [0,4,0], [4,0,4]], - [[0,4,0], [4,0,4], [0,4,0]], - [[4,0,4], [0,4,0], [4,0,4]]]).all(), True) - - # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap - def testScaleWrongType(self): - "Test scale function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - scale = Tensor.__dict__[self.typeStr + "Scale"] - tensor = np.array([[[1,0,1], [0,1,0], [1,0,1]], - [[0,1,0], [1,0,1], [0,1,0]], - [[1,0,1], [0,1,0], [1,0,1]]],'c') - self.assertRaises(TypeError, scale, tensor) - - # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap - def testScaleWrongDim(self): - "Test scale function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - scale = Tensor.__dict__[self.typeStr + "Scale"] - tensor = np.array([[1,0,1], [0,1,0], [1,0,1], - [0,1,0], [1,0,1], [0,1,0]],self.typeCode) - self.assertRaises(TypeError, scale, tensor) - - # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap - def testScaleWrongSize(self): - "Test scale function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - scale = Tensor.__dict__[self.typeStr + "Scale"] - tensor = np.array([[[1,0], [0,1], [1,0]], - [[0,1], [1,0], [0,1]], - [[1,0], [0,1], [1,0]]],self.typeCode) - self.assertRaises(TypeError, scale, tensor) - - # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap - def testScaleNonArray(self): - "Test scale function with non-array" - print >>sys.stderr, self.typeStr, "... ", - scale = Tensor.__dict__[self.typeStr + "Scale"] - self.assertRaises(TypeError, scale, True) - - # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testFloor(self): - "Test floor function" - print >>sys.stderr, self.typeStr, "... ", - floor = Tensor.__dict__[self.typeStr + "Floor"] - tensor = np.array([[[1,2], [3,4]], - [[5,6], [7,8]]],self.typeCode) - floor(tensor,4) - np.testing.assert_array_equal(tensor, np.array([[[4,4], [4,4]], - [[5,6], [7,8]]])) - - # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testFloorWrongType(self): - "Test floor function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - floor = Tensor.__dict__[self.typeStr + "Floor"] - tensor = np.array([[[1,2], [3,4]], - [[5,6], [7,8]]],'c') - self.assertRaises(TypeError, floor, tensor) - - # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testFloorWrongDim(self): - "Test floor function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - floor = Tensor.__dict__[self.typeStr + "Floor"] - tensor = np.array([[1,2], [3,4], [5,6], [7,8]],self.typeCode) - self.assertRaises(TypeError, floor, tensor) - - # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testFloorNonArray(self): - "Test floor function with non-array" - print >>sys.stderr, self.typeStr, "... ", - floor = Tensor.__dict__[self.typeStr + "Floor"] - self.assertRaises(TypeError, floor, object) - - # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap - def testCeil(self): - "Test ceil function" - print >>sys.stderr, self.typeStr, "... ", - ceil = Tensor.__dict__[self.typeStr + "Ceil"] - tensor = np.array([[[9,8], [7,6]], - [[5,4], [3,2]]],self.typeCode) - ceil(tensor,5) - np.testing.assert_array_equal(tensor, np.array([[[5,5], [5,5]], - [[5,4], [3,2]]])) - - # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap - def testCeilWrongType(self): - "Test ceil function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - ceil = Tensor.__dict__[self.typeStr + "Ceil"] - tensor = np.array([[[9,8], [7,6]], - [[5,4], [3,2]]],'c') - self.assertRaises(TypeError, ceil, tensor) - - # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap - def testCeilWrongDim(self): - "Test ceil function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - ceil = Tensor.__dict__[self.typeStr + "Ceil"] - tensor = np.array([[9,8], [7,6], [5,4], [3,2]], self.typeCode) - self.assertRaises(TypeError, ceil, tensor) - - # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap - def testCeilNonArray(self): - "Test ceil function with non-array" - print >>sys.stderr, self.typeStr, "... ", - ceil = Tensor.__dict__[self.typeStr + "Ceil"] - tensor = [[[9,8], [7,6]], - [[5,4], [3,2]]] - self.assertRaises(TypeError, ceil, tensor) - - # Test (type ARGOUT_ARRAY3[ANY][ANY][ANY]) typemap - def testLUSplit(self): - "Test luSplit function" - print >>sys.stderr, self.typeStr, "... ", - luSplit = Tensor.__dict__[self.typeStr + "LUSplit"] - lower, upper = luSplit([[[1,1], [1,1]], - [[1,1], [1,1]]]) - self.assertEquals((lower == [[[1,1], [1,0]], - [[1,0], [0,0]]]).all(), True) - self.assertEquals((upper == [[[0,0], [0,1]], - [[0,1], [1,1]]]).all(), True) - -###################################################################### - -class scharTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "schar" - self.typeCode = "b" - self.result = int(self.result) - -###################################################################### - -class ucharTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "uchar" - self.typeCode = "B" - self.result = int(self.result) - -###################################################################### - -class shortTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "short" - self.typeCode = "h" - self.result = int(self.result) - -###################################################################### - -class ushortTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "ushort" - self.typeCode = "H" - self.result = int(self.result) - -###################################################################### - -class intTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "int" - self.typeCode = "i" - self.result = int(self.result) - -###################################################################### - -class uintTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "uint" - self.typeCode = "I" - self.result = int(self.result) - -###################################################################### - -class longTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "long" - self.typeCode = "l" - self.result = int(self.result) - -###################################################################### - -class ulongTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "ulong" - self.typeCode = "L" - self.result = int(self.result) - -###################################################################### - -class longLongTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "longLong" - self.typeCode = "q" - self.result = int(self.result) - -###################################################################### - -class ulongLongTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "ulongLong" - self.typeCode = "Q" - self.result = int(self.result) - -###################################################################### - -class floatTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "float" - self.typeCode = "f" - -###################################################################### - -class doubleTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - -###################################################################### - -if __name__ == "__main__": - - # Build the test suite - suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) - - # Execute the test suite - print "Testing 3D Functions of Module Tensor" - print "NumPy version", np.__version__ - print - result = unittest.TextTestRunner(verbosity=2).run(suite) - sys.exit(len(result.errors) + len(result.failures)) diff --git a/numpy/doc/swig/test/testVector.py b/numpy/doc/swig/test/testVector.py deleted file mode 100755 index 2ee918389..000000000 --- a/numpy/doc/swig/test/testVector.py +++ /dev/null @@ -1,380 +0,0 @@ -#! /usr/bin/env python - -# System imports -from distutils.util import get_platform -import os -import sys -import unittest - -# Import NumPy -import numpy as np -major, minor = [ int(d) for d in np.__version__.split(".")[:2] ] -if major == 0: BadListError = TypeError -else: BadListError = ValueError - -import Vector - -###################################################################### - -class VectorTestCase(unittest.TestCase): - - def __init__(self, methodName="runTest"): - unittest.TestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - - # Test the (type IN_ARRAY1[ANY]) typemap - def testLength(self): - "Test length function" - print >>sys.stderr, self.typeStr, "... ", - length = Vector.__dict__[self.typeStr + "Length"] - self.assertEquals(length([5, 12, 0]), 13) - - # Test the (type IN_ARRAY1[ANY]) typemap - def testLengthBadList(self): - "Test length function with bad list" - print >>sys.stderr, self.typeStr, "... ", - length = Vector.__dict__[self.typeStr + "Length"] - self.assertRaises(BadListError, length, [5, "twelve", 0]) - - # Test the (type IN_ARRAY1[ANY]) typemap - def testLengthWrongSize(self): - "Test length function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - length = Vector.__dict__[self.typeStr + "Length"] - self.assertRaises(TypeError, length, [5, 12]) - - # Test the (type IN_ARRAY1[ANY]) typemap - def testLengthWrongDim(self): - "Test length function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - length = Vector.__dict__[self.typeStr + "Length"] - self.assertRaises(TypeError, length, [[1,2], [3,4]]) - - # Test the (type IN_ARRAY1[ANY]) typemap - def testLengthNonContainer(self): - "Test length function with non-container" - print >>sys.stderr, self.typeStr, "... ", - length = Vector.__dict__[self.typeStr + "Length"] - self.assertRaises(TypeError, length, None) - - # Test the (type* IN_ARRAY1, int DIM1) typemap - def testProd(self): - "Test prod function" - print >>sys.stderr, self.typeStr, "... ", - prod = Vector.__dict__[self.typeStr + "Prod"] - self.assertEquals(prod([1,2,3,4]), 24) - - # Test the (type* IN_ARRAY1, int DIM1) typemap - def testProdBadList(self): - "Test prod function with bad list" - print >>sys.stderr, self.typeStr, "... ", - prod = Vector.__dict__[self.typeStr + "Prod"] - self.assertRaises(BadListError, prod, [[1,"two"], ["e","pi"]]) - - # Test the (type* IN_ARRAY1, int DIM1) typemap - def testProdWrongDim(self): - "Test prod function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - prod = Vector.__dict__[self.typeStr + "Prod"] - self.assertRaises(TypeError, prod, [[1,2], [8,9]]) - - # Test the (type* IN_ARRAY1, int DIM1) typemap - def testProdNonContainer(self): - "Test prod function with non-container" - print >>sys.stderr, self.typeStr, "... ", - prod = Vector.__dict__[self.typeStr + "Prod"] - self.assertRaises(TypeError, prod, None) - - # Test the (int DIM1, type* IN_ARRAY1) typemap - def testSum(self): - "Test sum function" - print >>sys.stderr, self.typeStr, "... ", - sum = Vector.__dict__[self.typeStr + "Sum"] - self.assertEquals(sum([5,6,7,8]), 26) - - # Test the (int DIM1, type* IN_ARRAY1) typemap - def testSumBadList(self): - "Test sum function with bad list" - print >>sys.stderr, self.typeStr, "... ", - sum = Vector.__dict__[self.typeStr + "Sum"] - self.assertRaises(BadListError, sum, [3,4, 5, "pi"]) - - # Test the (int DIM1, type* IN_ARRAY1) typemap - def testSumWrongDim(self): - "Test sum function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - sum = Vector.__dict__[self.typeStr + "Sum"] - self.assertRaises(TypeError, sum, [[3,4], [5,6]]) - - # Test the (int DIM1, type* IN_ARRAY1) typemap - def testSumNonContainer(self): - "Test sum function with non-container" - print >>sys.stderr, self.typeStr, "... ", - sum = Vector.__dict__[self.typeStr + "Sum"] - self.assertRaises(TypeError, sum, True) - - # Test the (type INPLACE_ARRAY1[ANY]) typemap - def testReverse(self): - "Test reverse function" - print >>sys.stderr, self.typeStr, "... ", - reverse = Vector.__dict__[self.typeStr + "Reverse"] - vector = np.array([1,2,4],self.typeCode) - reverse(vector) - self.assertEquals((vector == [4,2,1]).all(), True) - - # Test the (type INPLACE_ARRAY1[ANY]) typemap - def testReverseWrongDim(self): - "Test reverse function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - reverse = Vector.__dict__[self.typeStr + "Reverse"] - vector = np.array([[1,2], [3,4]],self.typeCode) - self.assertRaises(TypeError, reverse, vector) - - # Test the (type INPLACE_ARRAY1[ANY]) typemap - def testReverseWrongSize(self): - "Test reverse function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - reverse = Vector.__dict__[self.typeStr + "Reverse"] - vector = np.array([9,8,7,6,5,4],self.typeCode) - self.assertRaises(TypeError, reverse, vector) - - # Test the (type INPLACE_ARRAY1[ANY]) typemap - def testReverseWrongType(self): - "Test reverse function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - reverse = Vector.__dict__[self.typeStr + "Reverse"] - vector = np.array([1,2,4],'c') - self.assertRaises(TypeError, reverse, vector) - - # Test the (type INPLACE_ARRAY1[ANY]) typemap - def testReverseNonArray(self): - "Test reverse function with non-array" - print >>sys.stderr, self.typeStr, "... ", - reverse = Vector.__dict__[self.typeStr + "Reverse"] - self.assertRaises(TypeError, reverse, [2,4,6]) - - # Test the (type* INPLACE_ARRAY1, int DIM1) typemap - def testOnes(self): - "Test ones function" - print >>sys.stderr, self.typeStr, "... ", - ones = Vector.__dict__[self.typeStr + "Ones"] - vector = np.zeros(5,self.typeCode) - ones(vector) - np.testing.assert_array_equal(vector, np.array([1,1,1,1,1])) - - # Test the (type* INPLACE_ARRAY1, int DIM1) typemap - def testOnesWrongDim(self): - "Test ones function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - ones = Vector.__dict__[self.typeStr + "Ones"] - vector = np.zeros((5,5),self.typeCode) - self.assertRaises(TypeError, ones, vector) - - # Test the (type* INPLACE_ARRAY1, int DIM1) typemap - def testOnesWrongType(self): - "Test ones function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - ones = Vector.__dict__[self.typeStr + "Ones"] - vector = np.zeros((5,5),'c') - self.assertRaises(TypeError, ones, vector) - - # Test the (type* INPLACE_ARRAY1, int DIM1) typemap - def testOnesNonArray(self): - "Test ones function with non-array" - print >>sys.stderr, self.typeStr, "... ", - ones = Vector.__dict__[self.typeStr + "Ones"] - self.assertRaises(TypeError, ones, [2,4,6,8]) - - # Test the (int DIM1, type* INPLACE_ARRAY1) typemap - def testZeros(self): - "Test zeros function" - print >>sys.stderr, self.typeStr, "... ", - zeros = Vector.__dict__[self.typeStr + "Zeros"] - vector = np.ones(5,self.typeCode) - zeros(vector) - np.testing.assert_array_equal(vector, np.array([0,0,0,0,0])) - - # Test the (int DIM1, type* INPLACE_ARRAY1) typemap - def testZerosWrongDim(self): - "Test zeros function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - zeros = Vector.__dict__[self.typeStr + "Zeros"] - vector = np.ones((5,5),self.typeCode) - self.assertRaises(TypeError, zeros, vector) - - # Test the (int DIM1, type* INPLACE_ARRAY1) typemap - def testZerosWrongType(self): - "Test zeros function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - zeros = Vector.__dict__[self.typeStr + "Zeros"] - vector = np.ones(6,'c') - self.assertRaises(TypeError, zeros, vector) - - # Test the (int DIM1, type* INPLACE_ARRAY1) typemap - def testZerosNonArray(self): - "Test zeros function with non-array" - print >>sys.stderr, self.typeStr, "... ", - zeros = Vector.__dict__[self.typeStr + "Zeros"] - self.assertRaises(TypeError, zeros, [1,3,5,7,9]) - - # Test the (type ARGOUT_ARRAY1[ANY]) typemap - def testEOSplit(self): - "Test eoSplit function" - print >>sys.stderr, self.typeStr, "... ", - eoSplit = Vector.__dict__[self.typeStr + "EOSplit"] - even, odd = eoSplit([1,2,3]) - self.assertEquals((even == [1,0,3]).all(), True) - self.assertEquals((odd == [0,2,0]).all(), True) - - # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap - def testTwos(self): - "Test twos function" - print >>sys.stderr, self.typeStr, "... ", - twos = Vector.__dict__[self.typeStr + "Twos"] - vector = twos(5) - self.assertEquals((vector == [2,2,2,2,2]).all(), True) - - # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap - def testTwosNonInt(self): - "Test twos function with non-integer dimension" - print >>sys.stderr, self.typeStr, "... ", - twos = Vector.__dict__[self.typeStr + "Twos"] - self.assertRaises(TypeError, twos, 5.0) - - # Test the (int DIM1, type* ARGOUT_ARRAY1) typemap - def testThrees(self): - "Test threes function" - print >>sys.stderr, self.typeStr, "... ", - threes = Vector.__dict__[self.typeStr + "Threes"] - vector = threes(6) - self.assertEquals((vector == [3,3,3,3,3,3]).all(), True) - - # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap - def testThreesNonInt(self): - "Test threes function with non-integer dimension" - print >>sys.stderr, self.typeStr, "... ", - threes = Vector.__dict__[self.typeStr + "Threes"] - self.assertRaises(TypeError, threes, "threes") - -###################################################################### - -class scharTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "schar" - self.typeCode = "b" - -###################################################################### - -class ucharTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "uchar" - self.typeCode = "B" - -###################################################################### - -class shortTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "short" - self.typeCode = "h" - -###################################################################### - -class ushortTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "ushort" - self.typeCode = "H" - -###################################################################### - -class intTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "int" - self.typeCode = "i" - -###################################################################### - -class uintTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "uint" - self.typeCode = "I" - -###################################################################### - -class longTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "long" - self.typeCode = "l" - -###################################################################### - -class ulongTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "ulong" - self.typeCode = "L" - -###################################################################### - -class longLongTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "longLong" - self.typeCode = "q" - -###################################################################### - -class ulongLongTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "ulongLong" - self.typeCode = "Q" - -###################################################################### - -class floatTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "float" - self.typeCode = "f" - -###################################################################### - -class doubleTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - -###################################################################### - -if __name__ == "__main__": - - # Build the test suite - suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) - - # Execute the test suite - print "Testing 1D Functions of Module Vector" - print "NumPy version", np.__version__ - print - result = unittest.TextTestRunner(verbosity=2).run(suite) - sys.exit(len(result.errors) + len(result.failures)) diff --git a/numpy/doc/ufuncs.py b/numpy/doc/ufuncs.py new file mode 100644 index 000000000..4819e5268 --- /dev/null +++ b/numpy/doc/ufuncs.py @@ -0,0 +1,135 @@ +""" +=================== +Universal Functions +=================== + +Ufuncs are, generally speaking, mathematical functions or operations that are +applied element-by-element to the contents of an array. That is, the result +in each output array element only depends on the value in the corresponding +input array (or arrays) and on no other array elements. Numpy comes with a +large suite of ufuncs, and scipy extends that suite substantially. The simplest +example is the addition operator: :: + + >>> np.array([0,2,3,4]) + np.array([1,1,-1,2]) + array([1, 3, 2, 6]) + +The unfunc module lists all the available ufuncs in numpy. Additional ufuncts +available in xxx in scipy. Documentation on the specific ufuncs may be found +in those modules. This documentation is intended to address the more general +aspects of unfuncs common to most of them. All of the ufuncs that make use of +Python operators (e.g., +, -, etc.) have equivalent functions defined +(e.g. add() for +) + +Type coercion +============= + +What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of +two different types? What is the type of the result? Typically, the result is +the higher of the two types. For example: :: + + float32 + float64 -> float64 + int8 + int32 -> int32 + int16 + float32 -> float32 + float32 + complex64 -> complex64 + +There are some less obvious cases generally involving mixes of types +(e.g. uints, ints and floats) where equal bit sizes for each are not +capable of saving all the information in a different type of equivalent +bit size. Some examples are int32 vs float32 or uint32 vs int32. +Generally, the result is the higher type of larger size than both +(if available). So: :: + + int32 + float32 -> float64 + uint32 + int32 -> int64 + +Finally, the type coercion behavior when expressions involve Python +scalars is different than that seen for arrays. Since Python has a +limited number of types, combining a Python int with a dtype=np.int8 +array does not coerce to the higher type but instead, the type of the +array prevails. So the rules for Python scalars combined with arrays is +that the result will be that of the array equivalent the Python scalar +if the Python scalar is of a higher 'kind' than the array (e.g., float +vs. int), otherwise the resultant type will be that of the array. +For example: :: + + Python int + int8 -> int8 + Python float + int8 -> float64 + +ufunc methods +============= + +Binary ufuncs support 4 methods. These methods are explained in detail in xxx +(or are they, I don't see anything in the ufunc docstring that is useful?). + +**.reduce(arr)** applies the binary operator to elements of the array in sequence. For example: :: + + >>> np.add.reduce(np.arange(10)) # adds all elements of array + 45 + +For multidimensional arrays, the first dimension is reduced by default: :: + + >>> np.add.reduce(np.arange(10).reshape(2,5)) + array([ 5, 7, 9, 11, 13]) + +The axis keyword can be used to specify different axes to reduce: :: + + >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1) + array([10, 35]) + +**.accumulate(arr)** applies the binary operator and generates an an equivalently +shaped array that includes the accumulated amount for each element of the +array. A couple examples: :: + + >>> np.add.accumulate(np.arange(10)) + array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) + >>> np.multiply.accumulate(np.arange(1,9)) + array([ 1, 2, 6, 24, 120, 720, 5040, 40320]) + +The behavior for multidimensional arrays is the same as for .reduce(), as is the use of the axis keyword). + +**.reduceat(arr,indices)** allows one to apply reduce to selected parts of an array. +It is a difficult method to understand. See the documentation at: + +**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and arr2. It will work on multidimensional arrays (the shape of the result is the +concatenation of the two input shapes.: :: + + >>> np.multiply.outer(np.arange(3),np.arange(4)) + array([[0, 0, 0, 0], + [0, 1, 2, 3], + [0, 2, 4, 6]]) + +Output arguments +================ + +All ufuncs accept an optional output array. The array must be of the expected output shape. Beware that if the type of the output array is of a +different (and lower) type than the output result, the results may be silently +truncated or otherwise corrupted in the downcast to the lower type. This usage +is useful when one wants to avoid creating large temporary arrays and instead +allows one to reuse the same array memory repeatedly (at the expense of not +being able to use more convenient operator notation in expressions). Note that +when the output argument is used, the ufunc still returns a reference to the +result. + + >>> x = np.arange(2) + >>> np.add(np.arange(2),np.arange(2.),x) + array([0, 2]) + >>> x + array([0, 2]) + +and & or as ufuncs +================== + +Invariably people try to use the python 'and' and 'or' as logical operators +(and quite understandably). But these operators do not behave as normal +operators since Python treats these quite differently. They cannot be +overloaded with array equivalents. Thus using 'and' or 'or' with an array +results in an error. There are two alternatives: + + 1) use the ufunc functions logical_and() and logical_or(). + 2) use the bitwise operators & and \\|. The drawback of these is that if + the arguments to these operators are not boolean arrays, the result is + likely incorrect. On the other hand, most usages of logical_and and + logical_or are with boolean arrays. As long as one is careful, this is + a convenient way to apply these operators. + +""" diff --git a/numpy/doc/ufuncs.txt b/numpy/doc/ufuncs.txt deleted file mode 100644 index fa107cc21..000000000 --- a/numpy/doc/ufuncs.txt +++ /dev/null @@ -1,103 +0,0 @@ -BUFFERED General Ufunc explanation -================================== - -.. note:: - - This was implemented already, but the notes are kept here for historical - and explanatory purposes. - -We need to optimize the section of ufunc code that handles mixed-type -and misbehaved arrays. In particular, we need to fix it so that items -are not copied into the buffer if they don't have to be. - -Right now, all data is copied into the buffers (even scalars are copied -multiple times into the buffers even if they are not going to be cast). - -Some benchmarks show that this results in a significant slow-down -(factor of 4) over similar numarray code. - -The approach is therefore, to loop over the largest-dimension (just like -the NO_BUFFER) portion of the code. All arrays will either have N or -1 in this last dimension (or their would be a mis-match error). The -buffer size is B. - -If N <= B (and only if needed), we copy the entire last-dimension into -the buffer as fast as possible using the single-stride information. - -Also we only copy into output arrays if needed as well (other-wise the -output arrays are used directly in the ufunc code). - -Call the function using the appropriate strides information from all the input -arrays. Only set the strides to the element-size for arrays that will be copied. - -If N > B, then we have to do the above operation in a loop (with an extra loop -at the end with a different buffer size). - -Both of these cases are handled with the following code:: - - Compute N = quotient * B + remainder. - quotient = N / B # integer math - (store quotient + 1) as the number of innerloops - remainder = N % B # integer remainder - -On the inner-dimension we will have (quotient + 1) loops where -the size of the inner function is B for all but the last when the niter size is -remainder. - -So, the code looks very similar to NOBUFFER_LOOP except the inner loop is -replaced with:: - - for(k=0; iobj gets set to 1. Then there are two cases: - -1) The loop function is an object loop: - - Inputs: - - castbuf starts as NULL and then gets filled with new references. - - function gets called and doesn't alter the reference count in castbuf - - on the next iteration (next value of k), the casting function will - DECREF what is present in castbuf already and place a new object. - - - At the end of the inner loop (for loop over k), the final new-references - in castbuf must be DECREF'd. If its a scalar then a single DECREF suffices - Otherwise, "bufsize" DECREF's are needed (unless there was only one - loop, then "remainder" DECREF's are needed). - - Outputs: - - castbuf contains a new reference as the result of the function call. This - gets converted to the type of interest and. This new reference in castbuf - will be DECREF'd by later calls to the function. Thus, only after the - inner most loop do we need to DECREF the remaining references in castbuf. - -2) The loop function is of a different type: - - Inputs: - - - The PyObject input is copied over to buffer which receives a "borrowed" - reference. This reference is then used but not altered by the cast - call. Nothing needs to be done. - - Outputs: - - - The buffer[i] memory receives the PyObject input after the cast. This is - a new reference which will be "stolen" as it is copied over into memory. - The only problem is that what is presently in memory must be DECREF'd first. - - - - - -- cgit v1.2.1