From 5c86844c34674e3d580ac2cd12ef171e18130b13 Mon Sep 17 00:00:00 2001 From: Stefan van der Walt Date: Sat, 23 Aug 2008 23:17:23 +0000 Subject: Move documentation outside of source tree. Remove `doc` import from __init__. --- doc/swig/Makefile | 36 + doc/swig/README | 130 ++++ doc/swig/doc/Makefile | 51 ++ doc/swig/doc/numpy_swig.html | 1244 ++++++++++++++++++++++++++++++++ doc/swig/doc/numpy_swig.pdf | Bin 0 -> 168839 bytes doc/swig/doc/numpy_swig.txt | 950 ++++++++++++++++++++++++ doc/swig/doc/testing.html | 482 +++++++++++++ doc/swig/doc/testing.pdf | Bin 0 -> 72439 bytes doc/swig/doc/testing.txt | 173 +++++ doc/swig/numpy.i | 1634 ++++++++++++++++++++++++++++++++++++++++++ doc/swig/pyfragments.swg | 174 +++++ doc/swig/test/Array.i | 107 +++ doc/swig/test/Array1.cxx | 131 ++++ doc/swig/test/Array1.h | 55 ++ doc/swig/test/Array2.cxx | 168 +++++ doc/swig/test/Array2.h | 63 ++ doc/swig/test/Farray.cxx | 122 ++++ doc/swig/test/Farray.h | 56 ++ doc/swig/test/Farray.i | 73 ++ doc/swig/test/Fortran.cxx | 24 + doc/swig/test/Fortran.h | 21 + doc/swig/test/Fortran.i | 36 + doc/swig/test/Makefile | 34 + doc/swig/test/Matrix.cxx | 112 +++ doc/swig/test/Matrix.h | 52 ++ doc/swig/test/Matrix.i | 45 ++ doc/swig/test/Tensor.cxx | 131 ++++ doc/swig/test/Tensor.h | 52 ++ doc/swig/test/Tensor.i | 49 ++ doc/swig/test/Vector.cxx | 100 +++ doc/swig/test/Vector.h | 58 ++ doc/swig/test/Vector.i | 47 ++ doc/swig/test/setup.py | 66 ++ doc/swig/test/testArray.py | 283 ++++++++ doc/swig/test/testFarray.py | 158 ++++ doc/swig/test/testFortran.py | 169 +++++ doc/swig/test/testMatrix.py | 361 ++++++++++ doc/swig/test/testTensor.py | 401 +++++++++++ doc/swig/test/testVector.py | 380 ++++++++++ 39 files changed, 8228 insertions(+) create mode 100644 doc/swig/Makefile create mode 100644 doc/swig/README create mode 100644 doc/swig/doc/Makefile create mode 100644 doc/swig/doc/numpy_swig.html create mode 100644 doc/swig/doc/numpy_swig.pdf create mode 100644 doc/swig/doc/numpy_swig.txt create mode 100644 doc/swig/doc/testing.html create mode 100644 doc/swig/doc/testing.pdf create mode 100644 doc/swig/doc/testing.txt create mode 100644 doc/swig/numpy.i create mode 100644 doc/swig/pyfragments.swg create mode 100644 doc/swig/test/Array.i create mode 100644 doc/swig/test/Array1.cxx create mode 100644 doc/swig/test/Array1.h create mode 100644 doc/swig/test/Array2.cxx create mode 100644 doc/swig/test/Array2.h create mode 100644 doc/swig/test/Farray.cxx create mode 100644 doc/swig/test/Farray.h create mode 100644 doc/swig/test/Farray.i create mode 100644 doc/swig/test/Fortran.cxx create mode 100644 doc/swig/test/Fortran.h create mode 100644 doc/swig/test/Fortran.i create mode 100644 doc/swig/test/Makefile create mode 100644 doc/swig/test/Matrix.cxx create mode 100644 doc/swig/test/Matrix.h create mode 100644 doc/swig/test/Matrix.i create mode 100644 doc/swig/test/Tensor.cxx create mode 100644 doc/swig/test/Tensor.h create mode 100644 doc/swig/test/Tensor.i create mode 100644 doc/swig/test/Vector.cxx create mode 100644 doc/swig/test/Vector.h create mode 100644 doc/swig/test/Vector.i create mode 100755 doc/swig/test/setup.py create mode 100755 doc/swig/test/testArray.py create mode 100755 doc/swig/test/testFarray.py create mode 100644 doc/swig/test/testFortran.py create mode 100755 doc/swig/test/testMatrix.py create mode 100755 doc/swig/test/testTensor.py create mode 100755 doc/swig/test/testVector.py (limited to 'doc/swig') diff --git a/doc/swig/Makefile b/doc/swig/Makefile new file mode 100644 index 000000000..b64492f45 --- /dev/null +++ b/doc/swig/Makefile @@ -0,0 +1,36 @@ +# List all of the subdirectories here for recursive make +SUBDIRS = test doc + +# Default target +.PHONY : default +default: + @echo "There is no default make target for this Makefile" + @echo "Valid make targets are:" + @echo " test - Compile and run tests of numpy.i" + @echo " doc - Generate numpy.i documentation" + @echo " all - make test + doc" + @echo " clean - Remove generated files recursively" + +# Target all +.PHONY : all +all: $(SUBDIRS) + +# Target test +.PHONY : test +test: + cd $@ && make $@ + +# Target doc +.PHONY : doc +doc: + cd $@ && make + +# Target clean +.PHONY : clean +clean: + @for dir in $(SUBDIRS); do \ + echo ; \ + echo Running \'make clean\' in $$dir; \ + cd $$dir && make clean && cd ..; \ + done; \ + echo diff --git a/doc/swig/README b/doc/swig/README new file mode 100644 index 000000000..d557b305f --- /dev/null +++ b/doc/swig/README @@ -0,0 +1,130 @@ +Notes for the numpy/doc/swig directory +====================================== + +This set of files is for developing and testing file numpy.i, which is +intended to be a set of typemaps for helping SWIG interface between C +and C++ code that uses C arrays and the python module NumPy. It is +ultimately hoped that numpy.i will be included as part of the SWIG +distribution. + +Documentation +------------- +Documentation for how to use numpy.i is in the doc directory. The +primary source file here is numpy_swig.txt, a restructured text file +that documents how to use numpy.i. The Makefile in doc allows for the +conversion of numpy_swig.txt to HTML (if you have docutils installed) +and to PDF (if you have docutils and latex/pdftex installed). This +should not be necessary, however, as numpy_swig.html and +numpy_swig.pdf are stored in the repository. + +The same is true for a file called doc/testing.txt, which describes +the testing system used here. + +If you have the prerequisites installed and wish to build the HTML and +PDF documentation, this can be achieved by calling:: + + $ make doc + +from the shell. + +Testing +------- +The tests are a good example of what we are trying to do with numpy.i. +The files related to testing are are in the test subdirectory:: + + Vector.h + Vector.cxx + Vector.i + testVector.py + + Matrix.h + Matrix.cxx + Matrix.i + testMatrix.py + + Tensor.h + Tensor.cxx + Tensor.i + testTensor.py + +The header files contain prototypes for functions that illustrate the +wrapping issues we wish to address. Right now, this consists of +functions with argument signatures of the following forms. Vector.h:: + + (type IN_ARRAY1[ANY]) + (type* IN_ARRAY1, int DIM1) + (int DIM1, type* IN_ARRAY1) + + (type INPLACE_ARRAY1[ANY]) + (type* INPLACE_ARRAY1, int DIM1) + (int DIM1, type* INPLACE_ARRAY1) + + (type ARGOUT_ARRAY1[ANY]) + (type* ARGOUT_ARRAY1, int DIM1) + (int DIM1, type* ARGOUT_ARRAY1) + +Matrix.h:: + + (type IN_ARRAY2[ANY][ANY]) + (type* IN_ARRAY2, int DIM1, int DIM2) + (int DIM1, int DIM2, type* IN_ARRAY2) + + (type INPLACE_ARRAY2[ANY][ANY]) + (type* INPLACE_ARRAY2, int DIM1, int DIM2) + (int DIM1, int DIM2, type* INPLACE_ARRAY2) + + (type ARGOUT_ARRAY2[ANY][ANY]) + +Tensor.h:: + + (type IN_ARRAY3[ANY][ANY][ANY]) + (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) + (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) + + (type INPLACE_ARRAY3[ANY][ANY][ANY]) + (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) + (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) + + (type ARGOUT_ARRAY3[ANY][ANY][ANY]) + +These function signatures take a pointer to an array of type "type", +whose length is specified by the integer(s) DIM1 (and DIM2, and DIM3). + +The objective for the IN_ARRAY signatures is for SWIG to generate +python wrappers that take a container that constitutes a valid +argument to the numpy array constructor, and can be used to build an +array of type "type". Currently, types "signed char", "unsigned +char", "short", "unsigned short", "int", "unsigned int", "long", +"unsigned long", "long long", "unsigned long long", "float", and +"double" are supported and tested. + +The objective for the INPLACE_ARRAY signatures is for SWIG to generate +python wrappers that accept a numpy array of any of the above-listed +types. + +The source files Vector.cxx, Matrix.cxx and Tensor.cxx contain the +actual implementations of the functions described in Vector.h, +Matrix.h and Tensor.h. The python scripts testVector.py, +testMatrix.py and testTensor.py test the resulting python wrappers +using the unittest module. + +The SWIG interface files Vector.i, Matrix.i and Tensor.i are used to +generate the wrapper code. The SWIG_FILE_WITH_INIT macro allows +numpy.i to be used with multiple python modules. If it is specified, +then the %init block found in Vector.i, Matrix.i and Tensor.i are +required. The other things done in Vector.i, Matrix.i and Tensor.i +are the inclusion of the appropriate header file and numpy.i file, and +the "%apply" directives to force the functions to use the typemaps. + +The setup.py script is a standard python distutils script. It defines +_Vector, _Matrix and _Tensor extension modules and Vector, Matrix and +Tensor python modules. The Makefile automates everything, setting up +the dependencies, calling swig to generate the wrappers, and calling +setup.py to compile the wrapper code and generate the shared objects. +Targets "all" (default), "test", "doc" and "clean" are supported. The +"doc" target creates HTML documentation (with make target "html"), and +PDF documentation (with make targets "tex" and "pdf"). + +To build and run the test code, simply execute from the shell:: + + $ make test diff --git a/doc/swig/doc/Makefile b/doc/swig/doc/Makefile new file mode 100644 index 000000000..9223f0481 --- /dev/null +++ b/doc/swig/doc/Makefile @@ -0,0 +1,51 @@ +# ReStructured Text +RST2HTML = rst2html.py +RST2LATEX = rst2latex.py +RFLAGS = --generator --time +HTML_FLAGS = --no-xml-declaration +LATEX_FLAGS = +LATEX = pdflatex + +# Web pages that need to be made +WEB_PAGES = numpy_swig.html testing.html + +# LaTeX files that need to be made +LATEX_FILES = numpy_swig.tex testing.tex + +# PDF files that need to be made +PDF_FILES = numpy_swig.pdf testing.pdf + +# Default target: documentation +.PHONY : doc +doc: html pdf + +# HTML target +.PHONY : html +html: $(WEB_PAGES) + +# Rule: %.txt -> %.html +%.html: %.txt + $(RST2HTML) $(RFLAGS) $(HTML_FLAGS) $< $@ + +# LaTeX target +.PHONY : tex +tex: $(LATEX_FILES) + +# Rule: %.txt -> %.tex +%.tex: %.txt + $(RST2LATEX) $(RFLAGS) $(LATEX_FLAGS) $< $@ + +# PDF target +.PHONY : pdf +pdf: $(PDF_FILES) + +# Rule: %.tex -> %.pdf +%.pdf: %.tex + $(LATEX) $< + $(LATEX) $< + +# Clean target +.PHONY : clean +clean: + $(RM) $(LATEX_FILES) + $(RM) *.pyc *.aux *.dvi *.log *.out *~ diff --git a/doc/swig/doc/numpy_swig.html b/doc/swig/doc/numpy_swig.html new file mode 100644 index 000000000..ed127f330 --- /dev/null +++ b/doc/swig/doc/numpy_swig.html @@ -0,0 +1,1244 @@ + + + + + +numpy.i: a SWIG Interface File for NumPy + + + + + +
+

numpy.i: a SWIG Interface File for NumPy

+ +++ + + + + + + + +
Author:Bill Spotz
Institution:Sandia National Laboratories
Date:1 December, 2007
+ +
+

Introduction

+

The Simple Wrapper and Interface Generator (or SWIG) is a powerful tool for generating wrapper +code for interfacing to a wide variety of scripting languages. +SWIG can parse header files, and using only the code prototypes, +create an interface to the target language. But SWIG is not +omnipotent. For example, it cannot know from the prototype:

+
+double rms(double* seq, int n);
+
+

what exactly seq is. Is it a single value to be altered in-place? +Is it an array, and if so what is its length? Is it input-only? +Output-only? Input-output? SWIG cannot determine these details, +and does not attempt to do so.

+

If we designed rms, we probably made it a routine that takes an +input-only array of length n of double values called seq +and returns the root mean square. The default behavior of SWIG, +however, will be to create a wrapper function that compiles, but is +nearly impossible to use from the scripting language in the way the C +routine was intended.

+

For python, the preferred way of handling +contiguous (or technically, strided) blocks of homogeneous data is +with the module NumPy, which provides full +object-oriented access to multidimensial arrays of data. Therefore, +the most logical python interface for the rms function would be +(including doc string):

+
+def rms(seq):
+    """
+    rms: return the root mean square of a sequence
+    rms(numpy.ndarray) -> double
+    rms(list) -> double
+    rms(tuple) -> double
+    """
+
+

where seq would be a NumPy array of double values, and its +length n would be extracted from seq internally before being +passed to the C routine. Even better, since NumPy supports +construction of arrays from arbitrary python sequences, seq +itself could be a nearly arbitrary sequence (so long as each element +can be converted to a double) and the wrapper code would +internally convert it to a NumPy array before extracting its data +and length.

+

SWIG allows these types of conversions to be defined via a +mechanism called typemaps. This document provides information on how +to use numpy.i, a SWIG interface file that defines a series of +typemaps intended to make the type of array-related conversions +described above relatively simple to implement. For example, suppose +that the rms function prototype defined above was in a header file +named rms.h. To obtain the python interface discussed above, +your SWIG interface file would need the following:

+
+%{
+#define SWIG_FILE_WITH_INIT
+#include "rms.h"
+%}
+
+%include "numpy.i"
+
+%init %{
+import_array();
+%}
+
+%apply (double* IN_ARRAY1, int DIM1) {(double* seq, int n)};
+%include "rms.h"
+
+

Typemaps are keyed off a list of one or more function arguments, +either by type or by type and name. We will refer to such lists as +signatures. One of the many typemaps defined by numpy.i is used +above and has the signature (double* IN_ARRAY1, int DIM1). The +argument names are intended to suggest that the double* argument +is an input array of one dimension and that the int represents +that dimension. This is precisely the pattern in the rms +prototype.

+

Most likely, no actual prototypes to be wrapped will have the argument +names IN_ARRAY1 and DIM1. We use the %apply directive to +apply the typemap for one-dimensional input arrays of type double +to the actual prototype used by rms. Using numpy.i +effectively, therefore, requires knowing what typemaps are available +and what they do.

+

A SWIG interface file that includes the SWIG directives given +above will produce wrapper code that looks something like:

+
+ 1 PyObject *_wrap_rms(PyObject *args) {
+ 2   PyObject *resultobj = 0;
+ 3   double *arg1 = (double *) 0 ;
+ 4   int arg2 ;
+ 5   double result;
+ 6   PyArrayObject *array1 = NULL ;
+ 7   int is_new_object1 = 0 ;
+ 8   PyObject * obj0 = 0 ;
+ 9
+10   if (!PyArg_ParseTuple(args,(char *)"O:rms",&obj0)) SWIG_fail;
+11   {
+12     array1 = obj_to_array_contiguous_allow_conversion(
+13                  obj0, NPY_DOUBLE, &is_new_object1);
+14     npy_intp size[1] = {
+15       -1
+16     };
+17     if (!array1 || !require_dimensions(array1, 1) ||
+18         !require_size(array1, size, 1)) SWIG_fail;
+19     arg1 = (double*) array1->data;
+20     arg2 = (int) array1->dimensions[0];
+21   }
+22   result = (double)rms(arg1,arg2);
+23   resultobj = SWIG_From_double((double)(result));
+24   {
+25     if (is_new_object1 && array1) Py_DECREF(array1);
+26   }
+27   return resultobj;
+28 fail:
+29   {
+30     if (is_new_object1 && array1) Py_DECREF(array1);
+31   }
+32   return NULL;
+33 }
+
+

The typemaps from numpy.i are responsible for the following lines +of code: 12--20, 25 and 30. Line 10 parses the input to the rms +function. From the format string "O:rms", we can see that the +argument list is expected to be a single python object (specified +by the O before the colon) and whose pointer is stored in +obj0. A number of functions, supplied by numpy.i, are called +to make and check the (possible) conversion from a generic python +object to a NumPy array. These functions are explained in the +section Helper Functions, but hopefully their names are +self-explanatory. At line 12 we use obj0 to construct a NumPy +array. At line 17, we check the validity of the result: that it is +non-null and that it has a single dimension of arbitrary length. Once +these states are verified, we extract the data buffer and length in +lines 19 and 20 so that we can call the underlying C function at line +22. Line 25 performs memory management for the case where we have +created a new array that is no longer needed.

+

This code has a significant amount of error handling. Note the +SWIG_fail is a macro for goto fail, refering to the label at +line 28. If the user provides the wrong number of arguments, this +will be caught at line 10. If construction of the NumPy array +fails or produces an array with the wrong number of dimensions, these +errors are caught at line 17. And finally, if an error is detected, +memory is still managed correctly at line 30.

+

Note that if the C function signature was in a different order:

+
+double rms(int n, double* seq);
+
+

that SWIG would not match the typemap signature given above with +the argument list for rms. Fortunately, numpy.i has a set of +typemaps with the data pointer given last:

+
+%apply (int DIM1, double* IN_ARRAY1) {(int n, double* seq)};
+
+

This simply has the effect of switching the definitions of arg1 +and arg2 in lines 3 and 4 of the generated code above, and their +assignments in lines 19 and 20.

+
+
+

Using numpy.i

+

The numpy.i file is currently located in the numpy/docs/swig +sub-directory under the numpy installation directory. Typically, +you will want to copy it to the directory where you are developing +your wrappers. If it is ever adopted by SWIG developers, then it +will be installed in a standard place where SWIG can find it.

+

A simple module that only uses a single SWIG interface file should +include the following:

+
+%{
+#define SWIG_FILE_WITH_INIT
+%}
+%include "numpy.i"
+%init %{
+import_array();
+%}
+
+

Within a compiled python module, import_array() should only get +called once. This could be in a C/C++ file that you have written and +is linked to the module. If this is the case, then none of your +interface files should #define SWIG_FILE_WITH_INIT or call +import_array(). Or, this initialization call could be in a +wrapper file generated by SWIG from an interface file that has the +%init block as above. If this is the case, and you have more than +one SWIG interface file, then only one interface file should +#define SWIG_FILE_WITH_INIT and call import_array().

+
+
+

Available Typemaps

+

The typemap directives provided by numpy.i for arrays of different +data types, say double and int, and dimensions of different +types, say int or long, are identical to one another except +for the C and NumPy type specifications. The typemaps are +therefore implemented (typically behind the scenes) via a macro:

+
+%numpy_typemaps(DATA_TYPE, DATA_TYPECODE, DIM_TYPE)
+
+

that can be invoked for appropriate (DATA_TYPE, DATA_TYPECODE, +DIM_TYPE) triplets. For example:

+
+%numpy_typemaps(double, NPY_DOUBLE, int)
+%numpy_typemaps(int,    NPY_INT   , int)
+
+

The numpy.i interface file uses the %numpy_typemaps macro to +implement typemaps for the following C data types and int +dimension types:

+
+
    +
  • signed char
  • +
  • unsigned char
  • +
  • short
  • +
  • unsigned short
  • +
  • int
  • +
  • unsigned int
  • +
  • long
  • +
  • unsigned long
  • +
  • long long
  • +
  • unsigned long long
  • +
  • float
  • +
  • double
  • +
+
+

In the following descriptions, we reference a generic DATA_TYPE, which +could be any of the C data types listed above, and DIM_TYPE which +should be one of the many types of integers.

+

The typemap signatures are largely differentiated on the name given to +the buffer pointer. Names with FARRAY are for FORTRAN-ordered +arrays, and names with ARRAY are for C-ordered (or 1D arrays).

+
+

Input Arrays

+

Input arrays are defined as arrays of data that are passed into a +routine but are not altered in-place or returned to the user. The +python input array is therefore allowed to be almost any python +sequence (such as a list) that can be converted to the requested type +of array. The input array signatures are

+

1D:

+
+
    +
  • ( DATA_TYPE IN_ARRAY1[ANY] )
  • +
  • ( DATA_TYPE* IN_ARRAY1, int DIM1 )
  • +
  • ( int DIM1, DATA_TYPE* IN_ARRAY1 )
  • +
+
+

2D:

+
+
    +
  • ( DATA_TYPE IN_ARRAY2[ANY][ANY] )
  • +
  • ( DATA_TYPE* IN_ARRAY2, int DIM1, int DIM2 )
  • +
  • ( int DIM1, int DIM2, DATA_TYPE* IN_ARRAY2 )
  • +
  • ( DATA_TYPE* IN_FARRAY2, int DIM1, int DIM2 )
  • +
  • ( int DIM1, int DIM2, DATA_TYPE* IN_FARRAY2 )
  • +
+
+

3D:

+
+
    +
  • ( DATA_TYPE IN_ARRAY3[ANY][ANY][ANY] )
  • +
  • ( DATA_TYPE* IN_ARRAY3, int DIM1, int DIM2, int DIM3 )
  • +
  • ( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_ARRAY3 )
  • +
  • ( DATA_TYPE* IN_FARRAY3, int DIM1, int DIM2, int DIM3 )
  • +
  • ( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_FARRAY3 )
  • +
+
+

The first signature listed, ( DATA_TYPE IN_ARRAY[ANY] ) is for +one-dimensional arrays with hard-coded dimensions. Likewise, +( DATA_TYPE IN_ARRAY2[ANY][ANY] ) is for two-dimensional arrays +with hard-coded dimensions, and similarly for three-dimensional.

+
+
+

In-Place Arrays

+

In-place arrays are defined as arrays that are modified in-place. The +input values may or may not be used, but the values at the time the +function returns are significant. The provided python argument +must therefore be a NumPy array of the required type. The in-place +signatures are

+

1D:

+
+
    +
  • ( DATA_TYPE INPLACE_ARRAY1[ANY] )
  • +
  • ( DATA_TYPE* INPLACE_ARRAY1, int DIM1 )
  • +
  • ( int DIM1, DATA_TYPE* INPLACE_ARRAY1 )
  • +
+
+

2D:

+
+
    +
  • ( DATA_TYPE INPLACE_ARRAY2[ANY][ANY] )
  • +
  • ( DATA_TYPE* INPLACE_ARRAY2, int DIM1, int DIM2 )
  • +
  • ( int DIM1, int DIM2, DATA_TYPE* INPLACE_ARRAY2 )
  • +
  • ( DATA_TYPE* INPLACE_FARRAY2, int DIM1, int DIM2 )
  • +
  • ( int DIM1, int DIM2, DATA_TYPE* INPLACE_FARRAY2 )
  • +
+
+

3D:

+
+
    +
  • ( DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY] )
  • +
  • ( DATA_TYPE* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3 )
  • +
  • ( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_ARRAY3 )
  • +
  • ( DATA_TYPE* INPLACE_FARRAY3, int DIM1, int DIM2, int DIM3 )
  • +
  • ( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_FARRAY3 )
  • +
+
+

These typemaps now check to make sure that the INPLACE_ARRAY +arguments use native byte ordering. If not, an exception is raised.

+
+
+

Argout Arrays

+

Argout arrays are arrays that appear in the input arguments in C, but +are in fact output arrays. This pattern occurs often when there is +more than one output variable and the single return argument is +therefore not sufficient. In python, the convential way to return +multiple arguments is to pack them into a sequence (tuple, list, etc.) +and return the sequence. This is what the argout typemaps do. If a +wrapped function that uses these argout typemaps has more than one +return argument, they are packed into a tuple or list, depending on +the version of python. The python user does not pass these +arrays in, they simply get returned. For the case where a dimension +is specified, the python user must provide that dimension as an +argument. The argout signatures are

+

1D:

+
+
    +
  • ( DATA_TYPE ARGOUT_ARRAY1[ANY] )
  • +
  • ( DATA_TYPE* ARGOUT_ARRAY1, int DIM1 )
  • +
  • ( int DIM1, DATA_TYPE* ARGOUT_ARRAY1 )
  • +
+
+

2D:

+
+
    +
  • ( DATA_TYPE ARGOUT_ARRAY2[ANY][ANY] )
  • +
+
+

3D:

+
+
    +
  • ( DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY] )
  • +
+
+

These are typically used in situations where in C/C++, you would +allocate a(n) array(s) on the heap, and call the function to fill the +array(s) values. In python, the arrays are allocated for you and +returned as new array objects.

+

Note that we support DATA_TYPE* argout typemaps in 1D, but not 2D +or 3D. This is because of a quirk with the SWIG typemap syntax and +cannot be avoided. Note that for these types of 1D typemaps, the +python function will take a single argument representing DIM1.

+
+
+

Argoutview Arrays

+

Argoutview arrays are for when your C code provides you with a view of +its internal data and does not require any memory to be allocated by +the user. This can be dangerous. There is almost no way to guarantee +that the internal data from the C code will remain in existence for +the entire lifetime of the NumPy array that encapsulates it. If +the user destroys the object that provides the view of the data before +destroying the NumPy array, then using that array my result in bad +memory references or segmentation faults. Nevertheless, there are +situations, working with large data sets, where you simply have no +other choice.

+

The C code to be wrapped for argoutview arrays are characterized by +pointers: pointers to the dimensions and double pointers to the data, +so that these values can be passed back to the user. The argoutview +typemap signatures are therefore

+

1D:

+
+
    +
  • ( DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1 )
  • +
  • ( DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1 )
  • +
+
+

2D:

+
+
    +
  • ( DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )
  • +
  • ( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2 )
  • +
  • ( DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )
  • +
  • ( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2 )
  • +
+
+

3D:

+
+
    +
  • ( DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
  • +
  • ( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3)
  • +
  • ( DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
  • +
  • ( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3)
  • +
+
+

Note that arrays with hard-coded dimensions are not supported. These +cannot follow the double pointer signatures of these typemaps.

+
+
+

Output Arrays

+

The numpy.i interface file does not support typemaps for output +arrays, for several reasons. First, C/C++ return arguments are +limited to a single value. This prevents obtaining dimension +information in a general way. Second, arrays with hard-coded lengths +are not permitted as return arguments. In other words:

+
+double[3] newVector(double x, double y, double z);
+
+

is not legal C/C++ syntax. Therefore, we cannot provide typemaps of +the form:

+
+%typemap(out) (TYPE[ANY]);
+
+

If you run into a situation where a function or method is returning a +pointer to an array, your best bet is to write your own version of the +function to be wrapped, either with %extend for the case of class +methods or %ignore and %rename for the case of functions.

+
+
+

Other Common Types: bool

+

Note that C++ type bool is not supported in the list in the +Available Typemaps section. NumPy bools are a single byte, while +the C++ bool is four bytes (at least on my system). Therefore:

+
+%numpy_typemaps(bool, NPY_BOOL, int)
+
+

will result in typemaps that will produce code that reference +improper data lengths. You can implement the following macro +expansion:

+
+%numpy_typemaps(bool, NPY_UINT, int)
+
+

to fix the data length problem, and Input Arrays will work fine, +but In-Place Arrays might fail type-checking.

+
+
+

Other Common Types: complex

+

Typemap conversions for complex floating-point types is also not +supported automatically. This is because python and NumPy are +written in C, which does not have native complex types. Both +python and NumPy implement their own (essentially equivalent) +struct definitions for complex variables:

+
+/* Python */
+typedef struct {double real; double imag;} Py_complex;
+
+/* NumPy */
+typedef struct {float  real, imag;} npy_cfloat;
+typedef struct {double real, imag;} npy_cdouble;
+
+

We could have implemented:

+
+%numpy_typemaps(Py_complex , NPY_CDOUBLE, int)
+%numpy_typemaps(npy_cfloat , NPY_CFLOAT , int)
+%numpy_typemaps(npy_cdouble, NPY_CDOUBLE, int)
+
+

which would have provided automatic type conversions for arrays of +type Py_complex, npy_cfloat and npy_cdouble. However, it +seemed unlikely that there would be any independent (non-python, +non-NumPy) application code that people would be using SWIG to +generate a python interface to, that also used these definitions +for complex types. More likely, these application codes will define +their own complex types, or in the case of C++, use std::complex. +Assuming these data structures are compatible with python and +NumPy complex types, %numpy_typemap expansions as above (with +the user's complex type substituted for the first argument) should +work.

+
+
+
+

NumPy Array Scalars and SWIG

+

SWIG has sophisticated type checking for numerical types. For +example, if your C/C++ routine expects an integer as input, the code +generated by SWIG will check for both python integers and +python long integers, and raise an overflow error if the provided +python integer is too big to cast down to a C integer. With the +introduction of NumPy scalar arrays into your python code, you +might conceivably extract an integer from a NumPy array and attempt +to pass this to a SWIG-wrapped C/C++ function that expects an +int, but the SWIG type checking will not recognize the NumPy +array scalar as an integer. (Often, this does in fact work -- it +depends on whether NumPy recognizes the integer type you are using +as inheriting from the python integer type on the platform you are +using. Sometimes, this means that code that works on a 32-bit machine +will fail on a 64-bit machine.)

+

If you get a python error that looks like the following:

+
+TypeError: in method 'MyClass_MyMethod', argument 2 of type 'int'
+
+

and the argument you are passing is an integer extracted from a +NumPy array, then you have stumbled upon this problem. The +solution is to modify the SWIG type conversion system to accept +Numpy array scalars in addition to the standard integer types. +Fortunately, this capabilitiy has been provided for you. Simply copy +the file:

+
+pyfragments.swg
+
+

to the working build directory for you project, and this problem will +be fixed. It is suggested that you do this anyway, as it only +increases the capabilities of your python interface.

+
+

Why is There a Second File?

+

The SWIG type checking and conversion system is a complicated +combination of C macros, SWIG macros, SWIG typemaps and SWIG +fragments. Fragments are a way to conditionally insert code into your +wrapper file if it is needed, and not insert it if not needed. If +multiple typemaps require the same fragment, the fragment only gets +inserted into your wrapper code once.

+

There is a fragment for converting a python integer to a C +long. There is a different fragment that converts a python +integer to a C int, that calls the rountine defined in the +long fragment. We can make the changes we want here by changing +the definition for the long fragment. SWIG determines the +active definition for a fragment using a "first come, first served" +system. That is, we need to define the fragment for long +conversions prior to SWIG doing it internally. SWIG allows us +to do this by putting our fragment definitions in the file +pyfragments.swg. If we were to put the new fragment definitions +in numpy.i, they would be ignored.

+
+
+
+

Helper Functions

+

The numpy.i file containes several macros and routines that it +uses internally to build its typemaps. However, these functions may +be useful elsewhere in your interface file. These macros and routines +are implemented as fragments, which are described briefly in the +previous section. If you try to use one or more of the following +macros or functions, but your compiler complains that it does not +recognize the symbol, then you need to force these fragments to appear +in your code using:

+
+%fragment("NumPy_Fragments");
+
+

in your SWIG interface file.

+
+

Macros

+
+
+
is_array(a)
+
Evaluates as true if a is non-NULL and can be cast to a +PyArrayObject*.
+
array_type(a)
+
Evaluates to the integer data type code of a, assuming a can +be cast to a PyArrayObject*.
+
array_numdims(a)
+
Evaluates to the integer number of dimensions of a, assuming +a can be cast to a PyArrayObject*.
+
array_dimensions(a)
+
Evaluates to an array of type npy_intp and length +array_numdims(a), giving the lengths of all of the dimensions +of a, assuming a can be cast to a PyArrayObject*.
+
array_size(a,i)
+
Evaluates to the i-th dimension size of a, assuming a +can be cast to a PyArrayObject*.
+
array_data(a)
+
Evaluates to a pointer of type void* that points to the data +buffer of a, assuming a can be cast to a PyArrayObject*.
+
array_is_contiguous(a)
+
Evaluates as true if a is a contiguous array. Equivalent to +(PyArray_ISCONTIGUOUS(a)).
+
array_is_native(a)
+
Evaluates as true if the data buffer of a uses native byte +order. Equivalent to (PyArray_ISNOTSWAPPED(a)).
+
array_is_fortran(a)
+
Evaluates as true if a is FORTRAN ordered.
+
+
+
+
+

Routines

+
+

pytype_string()

+
+

Return type: char*

+

Arguments:

+
    +
  • PyObject* py_obj, a general python object.
  • +
+

Return a string describing the type of py_obj.

+
+

typecode_string()

+
+

Return type: char*

+

Arguments:

+
    +
  • int typecode, a NumPy integer typecode.
  • +
+

Return a string describing the type corresponding to the NumPy +typecode.

+
+

type_match()

+
+

Return type: int

+

Arguments:

+
    +
  • int actual_type, the NumPy typecode of a NumPy array.
  • +
  • int desired_type, the desired NumPy typecode.
  • +
+

Make sure that actual_type is compatible with +desired_type. For example, this allows character and +byte types, or int and long types, to match. This is now +equivalent to PyArray_EquivTypenums().

+
+

obj_to_array_no_conversion()

+
+

Return type: PyArrayObject*

+

Arguments:

+
    +
  • PyObject* input, a general python object.
  • +
  • int typecode, the desired NumPy typecode.
  • +
+

Cast input to a PyArrayObject* if legal, and ensure that +it is of type typecode. If input cannot be cast, or the +typecode is wrong, set a python error and return NULL.

+
+

obj_to_array_allow_conversion()

+
+

Return type: PyArrayObject*

+

Arguments:

+
    +
  • PyObject* input, a general python object.
  • +
  • int typecode, the desired NumPy typecode of the resulting +array.
  • +
  • int* is_new_object, returns a value of 0 if no conversion +performed, else 1.
  • +
+

Convert input to a NumPy array with the given typecode. +On success, return a valid PyArrayObject* with the correct +type. On failure, the python error string will be set and the +routine returns NULL.

+
+

make_contiguous()

+
+

Return type: PyArrayObject*

+

Arguments:

+
    +
  • PyArrayObject* ary, a NumPy array.
  • +
  • int* is_new_object, returns a value of 0 if no conversion +performed, else 1.
  • +
  • int min_dims, minimum allowable dimensions.
  • +
  • int max_dims, maximum allowable dimensions.
  • +
+

Check to see if ary is contiguous. If so, return the input +pointer and flag it as not a new object. If it is not contiguous, +create a new PyArrayObject* using the original data, flag it +as a new object and return the pointer.

+
+

obj_to_array_contiguous_allow_conversion()

+
+

Return type: PyArrayObject*

+

Arguments:

+
    +
  • PyObject* input, a general python object.
  • +
  • int typecode, the desired NumPy typecode of the resulting +array.
  • +
  • int* is_new_object, returns a value of 0 if no conversion +performed, else 1.
  • +
+

Convert input to a contiguous PyArrayObject* of the +specified type. If the input object is not a contiguous +PyArrayObject*, a new one will be created and the new object +flag will be set.

+
+

require_contiguous()

+
+

Return type: int

+

Arguments:

+
    +
  • PyArrayObject* ary, a NumPy array.
  • +
+

Test whether ary is contiguous. If so, return 1. Otherwise, +set a python error and return 0.

+
+

require_native()

+
+

Return type: int

+

Arguments:

+
    +
  • PyArray_Object* ary, a NumPy array.
  • +
+

Require that ary is not byte-swapped. If the array is not +byte-swapped, return 1. Otherwise, set a python error and +return 0.

+
+

require_dimensions()

+
+

Return type: int

+

Arguments:

+
    +
  • PyArrayObject* ary, a NumPy array.
  • +
  • int exact_dimensions, the desired number of dimensions.
  • +
+

Require ary to have a specified number of dimensions. If the +array has the specified number of dimensions, return 1. +Otherwise, set a python error and return 0.

+
+

require_dimensions_n()

+
+

Return type: int

+

Arguments:

+
    +
  • PyArrayObject* ary, a NumPy array.
  • +
  • int* exact_dimensions, an array of integers representing +acceptable numbers of dimensions.
  • +
  • int n, the length of exact_dimensions.
  • +
+

Require ary to have one of a list of specified number of +dimensions. If the array has one of the specified number of +dimensions, return 1. Otherwise, set the python error string +and return 0.

+
+

require_size()

+
+

Return type: int

+

Arguments:

+
    +
  • PyArrayObject* ary, a NumPy array.
  • +
  • npy_int* size, an array representing the desired lengths of +each dimension.
  • +
  • int n, the length of size.
  • +
+

Require ary to have a specified shape. If the array has the +specified shape, return 1. Otherwise, set the python error +string and return 0.

+
+

require_fortran()

+
+

Return type: int

+

Arguments:

+
    +
  • PyArrayObject* ary, a NumPy array.
  • +
+

Require the given PyArrayObject to to be FORTRAN ordered. If +the the PyArrayObject is already FORTRAN ordered, do nothing. +Else, set the FORTRAN ordering flag and recompute the strides.

+
+
+
+
+
+

Beyond the Provided Typemaps

+

There are many C or C++ array/NumPy array situations not covered by +a simple %include "numpy.i" and subsequent %apply directives.

+
+

A Common Example

+

Consider a reasonable prototype for a dot product function:

+
+double dot(int len, double* vec1, double* vec2);
+
+

The python interface that we want is:

+
+def dot(vec1, vec2):
+    """
+    dot(PyObject,PyObject) -> double
+    """
+
+

The problem here is that there is one dimension argument and two array +arguments, and our typemaps are set up for dimensions that apply to a +single array (in fact, SWIG does not provide a mechanism for +associating len with vec2 that takes two python input +arguments). The recommended solution is the following:

+
+%apply (int DIM1, double* IN_ARRAY1) {(int len1, double* vec1),
+                                      (int len2, double* vec2)}
+%rename (dot) my_dot;
+%exception my_dot {
+    $action
+    if (PyErr_Occurred()) SWIG_fail;
+}
+%inline %{
+double my_dot(int len1, double* vec1, int len2, double* vec2) {
+    if (len1 != len2) {
+        PyErr_Format(PyExc_ValueError,
+                     "Arrays of lengths (%d,%d) given",
+                     len1, len2);
+        return 0.0;
+    }
+    return dot(len1, vec1, vec2);
+}
+%}
+
+

If the header file that contains the prototype for double dot() +also contains other prototypes that you want to wrap, so that you need +to %include this header file, then you will also need a %ignore +dot; directive, placed after the %rename and before the +%include directives. Or, if the function in question is a class +method, you will want to use %extend rather than %inline in +addition to %ignore.

+

A note on error handling: Note that my_dot returns a +double but that it can also raise a python error. The +resulting wrapper function will return a python float +representation of 0.0 when the vector lengths do not match. Since +this is not NULL, the python interpreter will not know to check +for an error. For this reason, we add the %exception directive +above for my_dot to get the behavior we want (note that +$action is a macro that gets expanded to a valid call to +my_dot). In general, you will probably want to write a SWIG +macro to perform this task.

+
+
+

Other Situations

+

There are other wrapping situations in which numpy.i may be +helpful when you encounter them.

+
+
    +
  • In some situations, it is possible that you could use the +%numpy_templates macro to implement typemaps for your own +types. See the Other Common Types: bool or Other Common +Types: complex sections for examples. Another situation is if +your dimensions are of a type other than int (say long for +example):

    +
    +%numpy_typemaps(double, NPY_DOUBLE, long)
    +
    +
  • +
  • You can use the code in numpy.i to write your own typemaps. +For example, if you had a four-dimensional array as a function +argument, you could cut-and-paste the appropriate +three-dimensional typemaps into your interface file. The +modifications for the fourth dimension would be trivial.

    +
  • +
  • Sometimes, the best approach is to use the %extend directive +to define new methods for your classes (or overload existing ones) +that take a PyObject* (that either is or can be converted to a +PyArrayObject*) instead of a pointer to a buffer. In this +case, the helper routines in numpy.i can be very useful.

    +
  • +
  • Writing typemaps can be a bit nonintuitive. If you have specific +questions about writing SWIG typemaps for NumPy, the +developers of numpy.i do monitor the +Numpy-discussion and +Swig-user mail lists.

    +
  • +
+
+
+
+

A Final Note

+

When you use the %apply directive, as is usually necessary to use +numpy.i, it will remain in effect until you tell SWIG that it +shouldn't be. If the arguments to the functions or methods that you +are wrapping have common names, such as length or vector, +these typemaps may get applied in situations you do not expect or +want. Therefore, it is always a good idea to add a %clear +directive after you are done with a specific typemap:

+
+%apply (double* IN_ARRAY1, int DIM1) {(double* vector, int length)}
+%include "my_header.h"
+%clear (double* vector, int length);
+
+

In general, you should target these typemap signatures specifically +where you want them, and then clear them after you are done.

+
+
+
+

Summary

+

Out of the box, numpy.i provides typemaps that support conversion +between NumPy arrays and C arrays:

+
+
    +
  • That can be one of 12 different scalar types: signed char, +unsigned char, short, unsigned short, int, +unsigned int, long, unsigned long, long long, +unsigned long long, float and double.
  • +
  • That support 41 different argument signatures for each data type, +including:
      +
    • One-dimensional, two-dimensional and three-dimensional arrays.
    • +
    • Input-only, in-place, argout and argoutview behavior.
    • +
    • Hard-coded dimensions, data-buffer-then-dimensions +specification, and dimensions-then-data-buffer specification.
    • +
    • Both C-ordering ("last dimension fastest") or FORTRAN-ordering +("first dimension fastest") support for 2D and 3D arrays.
    • +
    +
  • +
+
+

The numpy.i interface file also provides additional tools for +wrapper developers, including:

+
+
    +
  • A SWIG macro (%numpy_typemaps) with three arguments for +implementing the 41 argument signatures for the user's choice of +(1) C data type, (2) NumPy data type (assuming they match), and +(3) dimension type.
  • +
  • Nine C macros and 13 C functions that can be used to write +specialized typemaps, extensions, or inlined functions that handle +cases not covered by the provided typemaps.
  • +
+
+
+
+

Acknowledgements

+

Many people have worked to glue SWIG and NumPy together (as well +as SWIG and the predecessors of NumPy, Numeric and numarray). +The effort to standardize this work into numpy.i began at the 2005 +SciPy Conference with a conversation between +Fernando Perez and myself. Fernando collected helper functions and +typemaps from Eric Jones, Michael Hunter, Anna Omelchenko and Michael +Sanner. Sebastian Hasse and Georg Holzmann have also provided +additional error checking and use cases. The work of these +contributors has made this end result possible.

+
+
+ + + diff --git a/doc/swig/doc/numpy_swig.pdf b/doc/swig/doc/numpy_swig.pdf new file mode 100644 index 000000000..1d4642cf7 Binary files /dev/null and b/doc/swig/doc/numpy_swig.pdf differ diff --git a/doc/swig/doc/numpy_swig.txt b/doc/swig/doc/numpy_swig.txt new file mode 100644 index 000000000..bfde018bf --- /dev/null +++ b/doc/swig/doc/numpy_swig.txt @@ -0,0 +1,950 @@ +========================================== + numpy.i: a SWIG Interface File for NumPy +========================================== + +:Author: Bill Spotz +:Institution: Sandia National Laboratories +:Date: 1 December, 2007 + +.. contents:: + +Introduction +============ + +The Simple Wrapper and Interface Generator (or `SWIG +`_) is a powerful tool for generating wrapper +code for interfacing to a wide variety of scripting languages. +`SWIG`_ can parse header files, and using only the code prototypes, +create an interface to the target language. But `SWIG`_ is not +omnipotent. For example, it cannot know from the prototype:: + + double rms(double* seq, int n); + +what exactly ``seq`` is. Is it a single value to be altered in-place? +Is it an array, and if so what is its length? Is it input-only? +Output-only? Input-output? `SWIG`_ cannot determine these details, +and does not attempt to do so. + +If we designed ``rms``, we probably made it a routine that takes an +input-only array of length ``n`` of ``double`` values called ``seq`` +and returns the root mean square. The default behavior of `SWIG`_, +however, will be to create a wrapper function that compiles, but is +nearly impossible to use from the scripting language in the way the C +routine was intended. + +For `python `_, the preferred way of handling +contiguous (or technically, *strided*) blocks of homogeneous data is +with the module `NumPy `_, which provides full +object-oriented access to multidimensial arrays of data. Therefore, +the most logical `python`_ interface for the ``rms`` function would be +(including doc string):: + + def rms(seq): + """ + rms: return the root mean square of a sequence + rms(numpy.ndarray) -> double + rms(list) -> double + rms(tuple) -> double + """ + +where ``seq`` would be a `NumPy`_ array of ``double`` values, and its +length ``n`` would be extracted from ``seq`` internally before being +passed to the C routine. Even better, since `NumPy`_ supports +construction of arrays from arbitrary `python`_ sequences, ``seq`` +itself could be a nearly arbitrary sequence (so long as each element +can be converted to a ``double``) and the wrapper code would +internally convert it to a `NumPy`_ array before extracting its data +and length. + +`SWIG`_ allows these types of conversions to be defined via a +mechanism called typemaps. This document provides information on how +to use ``numpy.i``, a `SWIG`_ interface file that defines a series of +typemaps intended to make the type of array-related conversions +described above relatively simple to implement. For example, suppose +that the ``rms`` function prototype defined above was in a header file +named ``rms.h``. To obtain the `python`_ interface discussed above, +your `SWIG`_ interface file would need the following:: + + %{ + #define SWIG_FILE_WITH_INIT + #include "rms.h" + %} + + %include "numpy.i" + + %init %{ + import_array(); + %} + + %apply (double* IN_ARRAY1, int DIM1) {(double* seq, int n)}; + %include "rms.h" + +Typemaps are keyed off a list of one or more function arguments, +either by type or by type and name. We will refer to such lists as +*signatures*. One of the many typemaps defined by ``numpy.i`` is used +above and has the signature ``(double* IN_ARRAY1, int DIM1)``. The +argument names are intended to suggest that the ``double*`` argument +is an input array of one dimension and that the ``int`` represents +that dimension. This is precisely the pattern in the ``rms`` +prototype. + +Most likely, no actual prototypes to be wrapped will have the argument +names ``IN_ARRAY1`` and ``DIM1``. We use the ``%apply`` directive to +apply the typemap for one-dimensional input arrays of type ``double`` +to the actual prototype used by ``rms``. Using ``numpy.i`` +effectively, therefore, requires knowing what typemaps are available +and what they do. + +A `SWIG`_ interface file that includes the `SWIG`_ directives given +above will produce wrapper code that looks something like:: + + 1 PyObject *_wrap_rms(PyObject *args) { + 2 PyObject *resultobj = 0; + 3 double *arg1 = (double *) 0 ; + 4 int arg2 ; + 5 double result; + 6 PyArrayObject *array1 = NULL ; + 7 int is_new_object1 = 0 ; + 8 PyObject * obj0 = 0 ; + 9 + 10 if (!PyArg_ParseTuple(args,(char *)"O:rms",&obj0)) SWIG_fail; + 11 { + 12 array1 = obj_to_array_contiguous_allow_conversion( + 13 obj0, NPY_DOUBLE, &is_new_object1); + 14 npy_intp size[1] = { + 15 -1 + 16 }; + 17 if (!array1 || !require_dimensions(array1, 1) || + 18 !require_size(array1, size, 1)) SWIG_fail; + 19 arg1 = (double*) array1->data; + 20 arg2 = (int) array1->dimensions[0]; + 21 } + 22 result = (double)rms(arg1,arg2); + 23 resultobj = SWIG_From_double((double)(result)); + 24 { + 25 if (is_new_object1 && array1) Py_DECREF(array1); + 26 } + 27 return resultobj; + 28 fail: + 29 { + 30 if (is_new_object1 && array1) Py_DECREF(array1); + 31 } + 32 return NULL; + 33 } + +The typemaps from ``numpy.i`` are responsible for the following lines +of code: 12--20, 25 and 30. Line 10 parses the input to the ``rms`` +function. From the format string ``"O:rms"``, we can see that the +argument list is expected to be a single `python`_ object (specified +by the ``O`` before the colon) and whose pointer is stored in +``obj0``. A number of functions, supplied by ``numpy.i``, are called +to make and check the (possible) conversion from a generic `python`_ +object to a `NumPy`_ array. These functions are explained in the +section `Helper Functions`_, but hopefully their names are +self-explanatory. At line 12 we use ``obj0`` to construct a `NumPy`_ +array. At line 17, we check the validity of the result: that it is +non-null and that it has a single dimension of arbitrary length. Once +these states are verified, we extract the data buffer and length in +lines 19 and 20 so that we can call the underlying C function at line +22. Line 25 performs memory management for the case where we have +created a new array that is no longer needed. + +This code has a significant amount of error handling. Note the +``SWIG_fail`` is a macro for ``goto fail``, refering to the label at +line 28. If the user provides the wrong number of arguments, this +will be caught at line 10. If construction of the `NumPy`_ array +fails or produces an array with the wrong number of dimensions, these +errors are caught at line 17. And finally, if an error is detected, +memory is still managed correctly at line 30. + +Note that if the C function signature was in a different order:: + + double rms(int n, double* seq); + +that `SWIG`_ would not match the typemap signature given above with +the argument list for ``rms``. Fortunately, ``numpy.i`` has a set of +typemaps with the data pointer given last:: + + %apply (int DIM1, double* IN_ARRAY1) {(int n, double* seq)}; + +This simply has the effect of switching the definitions of ``arg1`` +and ``arg2`` in lines 3 and 4 of the generated code above, and their +assignments in lines 19 and 20. + +Using numpy.i +============= + +The ``numpy.i`` file is currently located in the ``numpy/docs/swig`` +sub-directory under the ``numpy`` installation directory. Typically, +you will want to copy it to the directory where you are developing +your wrappers. If it is ever adopted by `SWIG`_ developers, then it +will be installed in a standard place where `SWIG`_ can find it. + +A simple module that only uses a single `SWIG`_ interface file should +include the following:: + + %{ + #define SWIG_FILE_WITH_INIT + %} + %include "numpy.i" + %init %{ + import_array(); + %} + +Within a compiled `python`_ module, ``import_array()`` should only get +called once. This could be in a C/C++ file that you have written and +is linked to the module. If this is the case, then none of your +interface files should ``#define SWIG_FILE_WITH_INIT`` or call +``import_array()``. Or, this initialization call could be in a +wrapper file generated by `SWIG`_ from an interface file that has the +``%init`` block as above. If this is the case, and you have more than +one `SWIG`_ interface file, then only one interface file should +``#define SWIG_FILE_WITH_INIT`` and call ``import_array()``. + +Available Typemaps +================== + +The typemap directives provided by ``numpy.i`` for arrays of different +data types, say ``double`` and ``int``, and dimensions of different +types, say ``int`` or ``long``, are identical to one another except +for the C and `NumPy`_ type specifications. The typemaps are +therefore implemented (typically behind the scenes) via a macro:: + + %numpy_typemaps(DATA_TYPE, DATA_TYPECODE, DIM_TYPE) + +that can be invoked for appropriate ``(DATA_TYPE, DATA_TYPECODE, +DIM_TYPE)`` triplets. For example:: + + %numpy_typemaps(double, NPY_DOUBLE, int) + %numpy_typemaps(int, NPY_INT , int) + +The ``numpy.i`` interface file uses the ``%numpy_typemaps`` macro to +implement typemaps for the following C data types and ``int`` +dimension types: + + * ``signed char`` + * ``unsigned char`` + * ``short`` + * ``unsigned short`` + * ``int`` + * ``unsigned int`` + * ``long`` + * ``unsigned long`` + * ``long long`` + * ``unsigned long long`` + * ``float`` + * ``double`` + +In the following descriptions, we reference a generic ``DATA_TYPE``, which +could be any of the C data types listed above, and ``DIM_TYPE`` which +should be one of the many types of integers. + +The typemap signatures are largely differentiated on the name given to +the buffer pointer. Names with ``FARRAY`` are for FORTRAN-ordered +arrays, and names with ``ARRAY`` are for C-ordered (or 1D arrays). + +Input Arrays +------------ + +Input arrays are defined as arrays of data that are passed into a +routine but are not altered in-place or returned to the user. The +`python`_ input array is therefore allowed to be almost any `python`_ +sequence (such as a list) that can be converted to the requested type +of array. The input array signatures are + +1D: + + * ``( DATA_TYPE IN_ARRAY1[ANY] )`` + * ``( DATA_TYPE* IN_ARRAY1, int DIM1 )`` + * ``( int DIM1, DATA_TYPE* IN_ARRAY1 )`` + +2D: + + * ``( DATA_TYPE IN_ARRAY2[ANY][ANY] )`` + * ``( DATA_TYPE* IN_ARRAY2, int DIM1, int DIM2 )`` + * ``( int DIM1, int DIM2, DATA_TYPE* IN_ARRAY2 )`` + * ``( DATA_TYPE* IN_FARRAY2, int DIM1, int DIM2 )`` + * ``( int DIM1, int DIM2, DATA_TYPE* IN_FARRAY2 )`` + +3D: + + * ``( DATA_TYPE IN_ARRAY3[ANY][ANY][ANY] )`` + * ``( DATA_TYPE* IN_ARRAY3, int DIM1, int DIM2, int DIM3 )`` + * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_ARRAY3 )`` + * ``( DATA_TYPE* IN_FARRAY3, int DIM1, int DIM2, int DIM3 )`` + * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_FARRAY3 )`` + +The first signature listed, ``( DATA_TYPE IN_ARRAY[ANY] )`` is for +one-dimensional arrays with hard-coded dimensions. Likewise, +``( DATA_TYPE IN_ARRAY2[ANY][ANY] )`` is for two-dimensional arrays +with hard-coded dimensions, and similarly for three-dimensional. + +In-Place Arrays +--------------- + +In-place arrays are defined as arrays that are modified in-place. The +input values may or may not be used, but the values at the time the +function returns are significant. The provided `python`_ argument +must therefore be a `NumPy`_ array of the required type. The in-place +signatures are + +1D: + + * ``( DATA_TYPE INPLACE_ARRAY1[ANY] )`` + * ``( DATA_TYPE* INPLACE_ARRAY1, int DIM1 )`` + * ``( int DIM1, DATA_TYPE* INPLACE_ARRAY1 )`` + +2D: + + * ``( DATA_TYPE INPLACE_ARRAY2[ANY][ANY] )`` + * ``( DATA_TYPE* INPLACE_ARRAY2, int DIM1, int DIM2 )`` + * ``( int DIM1, int DIM2, DATA_TYPE* INPLACE_ARRAY2 )`` + * ``( DATA_TYPE* INPLACE_FARRAY2, int DIM1, int DIM2 )`` + * ``( int DIM1, int DIM2, DATA_TYPE* INPLACE_FARRAY2 )`` + +3D: + + * ``( DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY] )`` + * ``( DATA_TYPE* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3 )`` + * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_ARRAY3 )`` + * ``( DATA_TYPE* INPLACE_FARRAY3, int DIM1, int DIM2, int DIM3 )`` + * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_FARRAY3 )`` + +These typemaps now check to make sure that the ``INPLACE_ARRAY`` +arguments use native byte ordering. If not, an exception is raised. + +Argout Arrays +------------- + +Argout arrays are arrays that appear in the input arguments in C, but +are in fact output arrays. This pattern occurs often when there is +more than one output variable and the single return argument is +therefore not sufficient. In `python`_, the convential way to return +multiple arguments is to pack them into a sequence (tuple, list, etc.) +and return the sequence. This is what the argout typemaps do. If a +wrapped function that uses these argout typemaps has more than one +return argument, they are packed into a tuple or list, depending on +the version of `python`_. The `python`_ user does not pass these +arrays in, they simply get returned. For the case where a dimension +is specified, the python user must provide that dimension as an +argument. The argout signatures are + +1D: + + * ``( DATA_TYPE ARGOUT_ARRAY1[ANY] )`` + * ``( DATA_TYPE* ARGOUT_ARRAY1, int DIM1 )`` + * ``( int DIM1, DATA_TYPE* ARGOUT_ARRAY1 )`` + +2D: + + * ``( DATA_TYPE ARGOUT_ARRAY2[ANY][ANY] )`` + +3D: + + * ``( DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY] )`` + +These are typically used in situations where in C/C++, you would +allocate a(n) array(s) on the heap, and call the function to fill the +array(s) values. In `python`_, the arrays are allocated for you and +returned as new array objects. + +Note that we support ``DATA_TYPE*`` argout typemaps in 1D, but not 2D +or 3D. This is because of a quirk with the `SWIG`_ typemap syntax and +cannot be avoided. Note that for these types of 1D typemaps, the +`python`_ function will take a single argument representing ``DIM1``. + +Argoutview Arrays +----------------- + +Argoutview arrays are for when your C code provides you with a view of +its internal data and does not require any memory to be allocated by +the user. This can be dangerous. There is almost no way to guarantee +that the internal data from the C code will remain in existence for +the entire lifetime of the `NumPy`_ array that encapsulates it. If +the user destroys the object that provides the view of the data before +destroying the `NumPy`_ array, then using that array my result in bad +memory references or segmentation faults. Nevertheless, there are +situations, working with large data sets, where you simply have no +other choice. + +The C code to be wrapped for argoutview arrays are characterized by +pointers: pointers to the dimensions and double pointers to the data, +so that these values can be passed back to the user. The argoutview +typemap signatures are therefore + +1D: + + * ``( DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1 )`` + * ``( DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1 )`` + +2D: + + * ``( DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )`` + * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2 )`` + * ``( DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )`` + * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2 )`` + +3D: + + * ``( DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)`` + * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3)`` + * ``( DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)`` + * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3)`` + +Note that arrays with hard-coded dimensions are not supported. These +cannot follow the double pointer signatures of these typemaps. + +Output Arrays +------------- + +The ``numpy.i`` interface file does not support typemaps for output +arrays, for several reasons. First, C/C++ return arguments are +limited to a single value. This prevents obtaining dimension +information in a general way. Second, arrays with hard-coded lengths +are not permitted as return arguments. In other words:: + + double[3] newVector(double x, double y, double z); + +is not legal C/C++ syntax. Therefore, we cannot provide typemaps of +the form:: + + %typemap(out) (TYPE[ANY]); + +If you run into a situation where a function or method is returning a +pointer to an array, your best bet is to write your own version of the +function to be wrapped, either with ``%extend`` for the case of class +methods or ``%ignore`` and ``%rename`` for the case of functions. + +Other Common Types: bool +------------------------ + +Note that C++ type ``bool`` is not supported in the list in the +`Available Typemaps`_ section. NumPy bools are a single byte, while +the C++ ``bool`` is four bytes (at least on my system). Therefore:: + + %numpy_typemaps(bool, NPY_BOOL, int) + +will result in typemaps that will produce code that reference +improper data lengths. You can implement the following macro +expansion:: + + %numpy_typemaps(bool, NPY_UINT, int) + +to fix the data length problem, and `Input Arrays`_ will work fine, +but `In-Place Arrays`_ might fail type-checking. + +Other Common Types: complex +--------------------------- + +Typemap conversions for complex floating-point types is also not +supported automatically. This is because `python`_ and `NumPy`_ are +written in C, which does not have native complex types. Both +`python`_ and `NumPy`_ implement their own (essentially equivalent) +``struct`` definitions for complex variables:: + + /* Python */ + typedef struct {double real; double imag;} Py_complex; + + /* NumPy */ + typedef struct {float real, imag;} npy_cfloat; + typedef struct {double real, imag;} npy_cdouble; + +We could have implemented:: + + %numpy_typemaps(Py_complex , NPY_CDOUBLE, int) + %numpy_typemaps(npy_cfloat , NPY_CFLOAT , int) + %numpy_typemaps(npy_cdouble, NPY_CDOUBLE, int) + +which would have provided automatic type conversions for arrays of +type ``Py_complex``, ``npy_cfloat`` and ``npy_cdouble``. However, it +seemed unlikely that there would be any independent (non-`python`_, +non-`NumPy`_) application code that people would be using `SWIG`_ to +generate a `python`_ interface to, that also used these definitions +for complex types. More likely, these application codes will define +their own complex types, or in the case of C++, use ``std::complex``. +Assuming these data structures are compatible with `python`_ and +`NumPy`_ complex types, ``%numpy_typemap`` expansions as above (with +the user's complex type substituted for the first argument) should +work. + +NumPy Array Scalars and SWIG +============================ + +`SWIG`_ has sophisticated type checking for numerical types. For +example, if your C/C++ routine expects an integer as input, the code +generated by `SWIG`_ will check for both `python`_ integers and +`python`_ long integers, and raise an overflow error if the provided +`python`_ integer is too big to cast down to a C integer. With the +introduction of `NumPy`_ scalar arrays into your `python`_ code, you +might conceivably extract an integer from a `NumPy`_ array and attempt +to pass this to a `SWIG`_-wrapped C/C++ function that expects an +``int``, but the `SWIG`_ type checking will not recognize the `NumPy`_ +array scalar as an integer. (Often, this does in fact work -- it +depends on whether `NumPy`_ recognizes the integer type you are using +as inheriting from the `python`_ integer type on the platform you are +using. Sometimes, this means that code that works on a 32-bit machine +will fail on a 64-bit machine.) + +If you get a `python`_ error that looks like the following:: + + TypeError: in method 'MyClass_MyMethod', argument 2 of type 'int' + +and the argument you are passing is an integer extracted from a +`NumPy`_ array, then you have stumbled upon this problem. The +solution is to modify the `SWIG`_ type conversion system to accept +`Numpy`_ array scalars in addition to the standard integer types. +Fortunately, this capabilitiy has been provided for you. Simply copy +the file:: + + pyfragments.swg + +to the working build directory for you project, and this problem will +be fixed. It is suggested that you do this anyway, as it only +increases the capabilities of your `python`_ interface. + +Why is There a Second File? +--------------------------- + +The `SWIG`_ type checking and conversion system is a complicated +combination of C macros, `SWIG`_ macros, `SWIG`_ typemaps and `SWIG`_ +fragments. Fragments are a way to conditionally insert code into your +wrapper file if it is needed, and not insert it if not needed. If +multiple typemaps require the same fragment, the fragment only gets +inserted into your wrapper code once. + +There is a fragment for converting a `python`_ integer to a C +``long``. There is a different fragment that converts a `python`_ +integer to a C ``int``, that calls the rountine defined in the +``long`` fragment. We can make the changes we want here by changing +the definition for the ``long`` fragment. `SWIG`_ determines the +active definition for a fragment using a "first come, first served" +system. That is, we need to define the fragment for ``long`` +conversions prior to `SWIG`_ doing it internally. `SWIG`_ allows us +to do this by putting our fragment definitions in the file +``pyfragments.swg``. If we were to put the new fragment definitions +in ``numpy.i``, they would be ignored. + +Helper Functions +================ + +The ``numpy.i`` file containes several macros and routines that it +uses internally to build its typemaps. However, these functions may +be useful elsewhere in your interface file. These macros and routines +are implemented as fragments, which are described briefly in the +previous section. If you try to use one or more of the following +macros or functions, but your compiler complains that it does not +recognize the symbol, then you need to force these fragments to appear +in your code using:: + + %fragment("NumPy_Fragments"); + +in your `SWIG`_ interface file. + +Macros +------ + + **is_array(a)** + Evaluates as true if ``a`` is non-``NULL`` and can be cast to a + ``PyArrayObject*``. + + **array_type(a)** + Evaluates to the integer data type code of ``a``, assuming ``a`` can + be cast to a ``PyArrayObject*``. + + **array_numdims(a)** + Evaluates to the integer number of dimensions of ``a``, assuming + ``a`` can be cast to a ``PyArrayObject*``. + + **array_dimensions(a)** + Evaluates to an array of type ``npy_intp`` and length + ``array_numdims(a)``, giving the lengths of all of the dimensions + of ``a``, assuming ``a`` can be cast to a ``PyArrayObject*``. + + **array_size(a,i)** + Evaluates to the ``i``-th dimension size of ``a``, assuming ``a`` + can be cast to a ``PyArrayObject*``. + + **array_data(a)** + Evaluates to a pointer of type ``void*`` that points to the data + buffer of ``a``, assuming ``a`` can be cast to a ``PyArrayObject*``. + + **array_is_contiguous(a)** + Evaluates as true if ``a`` is a contiguous array. Equivalent to + ``(PyArray_ISCONTIGUOUS(a))``. + + **array_is_native(a)** + Evaluates as true if the data buffer of ``a`` uses native byte + order. Equivalent to ``(PyArray_ISNOTSWAPPED(a))``. + + **array_is_fortran(a)** + Evaluates as true if ``a`` is FORTRAN ordered. + +Routines +-------- + + **pytype_string()** + + Return type: ``char*`` + + Arguments: + + * ``PyObject* py_obj``, a general `python`_ object. + + Return a string describing the type of ``py_obj``. + + + **typecode_string()** + + Return type: ``char*`` + + Arguments: + + * ``int typecode``, a `NumPy`_ integer typecode. + + Return a string describing the type corresponding to the `NumPy`_ + ``typecode``. + + **type_match()** + + Return type: ``int`` + + Arguments: + + * ``int actual_type``, the `NumPy`_ typecode of a `NumPy`_ array. + + * ``int desired_type``, the desired `NumPy`_ typecode. + + Make sure that ``actual_type`` is compatible with + ``desired_type``. For example, this allows character and + byte types, or int and long types, to match. This is now + equivalent to ``PyArray_EquivTypenums()``. + + + **obj_to_array_no_conversion()** + + Return type: ``PyArrayObject*`` + + Arguments: + + * ``PyObject* input``, a general `python`_ object. + + * ``int typecode``, the desired `NumPy`_ typecode. + + Cast ``input`` to a ``PyArrayObject*`` if legal, and ensure that + it is of type ``typecode``. If ``input`` cannot be cast, or the + ``typecode`` is wrong, set a `python`_ error and return ``NULL``. + + + **obj_to_array_allow_conversion()** + + Return type: ``PyArrayObject*`` + + Arguments: + + * ``PyObject* input``, a general `python`_ object. + + * ``int typecode``, the desired `NumPy`_ typecode of the resulting + array. + + * ``int* is_new_object``, returns a value of 0 if no conversion + performed, else 1. + + Convert ``input`` to a `NumPy`_ array with the given ``typecode``. + On success, return a valid ``PyArrayObject*`` with the correct + type. On failure, the `python`_ error string will be set and the + routine returns ``NULL``. + + + **make_contiguous()** + + Return type: ``PyArrayObject*`` + + Arguments: + + * ``PyArrayObject* ary``, a `NumPy`_ array. + + * ``int* is_new_object``, returns a value of 0 if no conversion + performed, else 1. + + * ``int min_dims``, minimum allowable dimensions. + + * ``int max_dims``, maximum allowable dimensions. + + Check to see if ``ary`` is contiguous. If so, return the input + pointer and flag it as not a new object. If it is not contiguous, + create a new ``PyArrayObject*`` using the original data, flag it + as a new object and return the pointer. + + + **obj_to_array_contiguous_allow_conversion()** + + Return type: ``PyArrayObject*`` + + Arguments: + + * ``PyObject* input``, a general `python`_ object. + + * ``int typecode``, the desired `NumPy`_ typecode of the resulting + array. + + * ``int* is_new_object``, returns a value of 0 if no conversion + performed, else 1. + + Convert ``input`` to a contiguous ``PyArrayObject*`` of the + specified type. If the input object is not a contiguous + ``PyArrayObject*``, a new one will be created and the new object + flag will be set. + + + **require_contiguous()** + + Return type: ``int`` + + Arguments: + + * ``PyArrayObject* ary``, a `NumPy`_ array. + + Test whether ``ary`` is contiguous. If so, return 1. Otherwise, + set a `python`_ error and return 0. + + + **require_native()** + + Return type: ``int`` + + Arguments: + + * ``PyArray_Object* ary``, a `NumPy`_ array. + + Require that ``ary`` is not byte-swapped. If the array is not + byte-swapped, return 1. Otherwise, set a `python`_ error and + return 0. + + **require_dimensions()** + + Return type: ``int`` + + Arguments: + + * ``PyArrayObject* ary``, a `NumPy`_ array. + + * ``int exact_dimensions``, the desired number of dimensions. + + Require ``ary`` to have a specified number of dimensions. If the + array has the specified number of dimensions, return 1. + Otherwise, set a `python`_ error and return 0. + + + **require_dimensions_n()** + + Return type: ``int`` + + Arguments: + + * ``PyArrayObject* ary``, a `NumPy`_ array. + + * ``int* exact_dimensions``, an array of integers representing + acceptable numbers of dimensions. + + * ``int n``, the length of ``exact_dimensions``. + + Require ``ary`` to have one of a list of specified number of + dimensions. If the array has one of the specified number of + dimensions, return 1. Otherwise, set the `python`_ error string + and return 0. + + + **require_size()** + + Return type: ``int`` + + Arguments: + + * ``PyArrayObject* ary``, a `NumPy`_ array. + + * ``npy_int* size``, an array representing the desired lengths of + each dimension. + + * ``int n``, the length of ``size``. + + Require ``ary`` to have a specified shape. If the array has the + specified shape, return 1. Otherwise, set the `python`_ error + string and return 0. + + + **require_fortran()** + + Return type: ``int`` + + Arguments: + + * ``PyArrayObject* ary``, a `NumPy`_ array. + + Require the given ``PyArrayObject`` to to be FORTRAN ordered. If + the the ``PyArrayObject`` is already FORTRAN ordered, do nothing. + Else, set the FORTRAN ordering flag and recompute the strides. + + +Beyond the Provided Typemaps +============================ + +There are many C or C++ array/`NumPy`_ array situations not covered by +a simple ``%include "numpy.i"`` and subsequent ``%apply`` directives. + +A Common Example +---------------- + +Consider a reasonable prototype for a dot product function:: + + double dot(int len, double* vec1, double* vec2); + +The `python`_ interface that we want is:: + + def dot(vec1, vec2): + """ + dot(PyObject,PyObject) -> double + """ + +The problem here is that there is one dimension argument and two array +arguments, and our typemaps are set up for dimensions that apply to a +single array (in fact, `SWIG`_ does not provide a mechanism for +associating ``len`` with ``vec2`` that takes two `python`_ input +arguments). The recommended solution is the following:: + + %apply (int DIM1, double* IN_ARRAY1) {(int len1, double* vec1), + (int len2, double* vec2)} + %rename (dot) my_dot; + %exception my_dot { + $action + if (PyErr_Occurred()) SWIG_fail; + } + %inline %{ + double my_dot(int len1, double* vec1, int len2, double* vec2) { + if (len1 != len2) { + PyErr_Format(PyExc_ValueError, + "Arrays of lengths (%d,%d) given", + len1, len2); + return 0.0; + } + return dot(len1, vec1, vec2); + } + %} + +If the header file that contains the prototype for ``double dot()`` +also contains other prototypes that you want to wrap, so that you need +to ``%include`` this header file, then you will also need a ``%ignore +dot;`` directive, placed after the ``%rename`` and before the +``%include`` directives. Or, if the function in question is a class +method, you will want to use ``%extend`` rather than ``%inline`` in +addition to ``%ignore``. + +**A note on error handling:** Note that ``my_dot`` returns a +``double`` but that it can also raise a `python`_ error. The +resulting wrapper function will return a `python`_ float +representation of 0.0 when the vector lengths do not match. Since +this is not ``NULL``, the `python`_ interpreter will not know to check +for an error. For this reason, we add the ``%exception`` directive +above for ``my_dot`` to get the behavior we want (note that +``$action`` is a macro that gets expanded to a valid call to +``my_dot``). In general, you will probably want to write a `SWIG`_ +macro to perform this task. + +Other Situations +---------------- + +There are other wrapping situations in which ``numpy.i`` may be +helpful when you encounter them. + + * In some situations, it is possible that you could use the + ``%numpy_templates`` macro to implement typemaps for your own + types. See the `Other Common Types: bool`_ or `Other Common + Types: complex`_ sections for examples. Another situation is if + your dimensions are of a type other than ``int`` (say ``long`` for + example):: + + %numpy_typemaps(double, NPY_DOUBLE, long) + + * You can use the code in ``numpy.i`` to write your own typemaps. + For example, if you had a four-dimensional array as a function + argument, you could cut-and-paste the appropriate + three-dimensional typemaps into your interface file. The + modifications for the fourth dimension would be trivial. + + * Sometimes, the best approach is to use the ``%extend`` directive + to define new methods for your classes (or overload existing ones) + that take a ``PyObject*`` (that either is or can be converted to a + ``PyArrayObject*``) instead of a pointer to a buffer. In this + case, the helper routines in ``numpy.i`` can be very useful. + + * Writing typemaps can be a bit nonintuitive. If you have specific + questions about writing `SWIG`_ typemaps for `NumPy`_, the + developers of ``numpy.i`` do monitor the + `Numpy-discussion `_ and + `Swig-user `_ mail lists. + +A Final Note +------------ + +When you use the ``%apply`` directive, as is usually necessary to use +``numpy.i``, it will remain in effect until you tell `SWIG`_ that it +shouldn't be. If the arguments to the functions or methods that you +are wrapping have common names, such as ``length`` or ``vector``, +these typemaps may get applied in situations you do not expect or +want. Therefore, it is always a good idea to add a ``%clear`` +directive after you are done with a specific typemap:: + + %apply (double* IN_ARRAY1, int DIM1) {(double* vector, int length)} + %include "my_header.h" + %clear (double* vector, int length); + +In general, you should target these typemap signatures specifically +where you want them, and then clear them after you are done. + +Summary +======= + +Out of the box, ``numpy.i`` provides typemaps that support conversion +between `NumPy`_ arrays and C arrays: + + * That can be one of 12 different scalar types: ``signed char``, + ``unsigned char``, ``short``, ``unsigned short``, ``int``, + ``unsigned int``, ``long``, ``unsigned long``, ``long long``, + ``unsigned long long``, ``float`` and ``double``. + + * That support 41 different argument signatures for each data type, + including: + + + One-dimensional, two-dimensional and three-dimensional arrays. + + + Input-only, in-place, argout and argoutview behavior. + + + Hard-coded dimensions, data-buffer-then-dimensions + specification, and dimensions-then-data-buffer specification. + + + Both C-ordering ("last dimension fastest") or FORTRAN-ordering + ("first dimension fastest") support for 2D and 3D arrays. + +The ``numpy.i`` interface file also provides additional tools for +wrapper developers, including: + + * A `SWIG`_ macro (``%numpy_typemaps``) with three arguments for + implementing the 41 argument signatures for the user's choice of + (1) C data type, (2) `NumPy`_ data type (assuming they match), and + (3) dimension type. + + * Nine C macros and 13 C functions that can be used to write + specialized typemaps, extensions, or inlined functions that handle + cases not covered by the provided typemaps. + +Acknowledgements +================ + +Many people have worked to glue `SWIG`_ and `NumPy`_ together (as well +as `SWIG`_ and the predecessors of `NumPy`_, Numeric and numarray). +The effort to standardize this work into ``numpy.i`` began at the 2005 +`SciPy `_ Conference with a conversation between +Fernando Perez and myself. Fernando collected helper functions and +typemaps from Eric Jones, Michael Hunter, Anna Omelchenko and Michael +Sanner. Sebastian Hasse and Georg Holzmann have also provided +additional error checking and use cases. The work of these +contributors has made this end result possible. diff --git a/doc/swig/doc/testing.html b/doc/swig/doc/testing.html new file mode 100644 index 000000000..3622550df --- /dev/null +++ b/doc/swig/doc/testing.html @@ -0,0 +1,482 @@ + + + + + +Testing the numpy.i Typemaps + + + + + +
+

Testing the numpy.i Typemaps

+ +++ + + + + + + + +
Author:Bill Spotz
Institution:Sandia National Laboratories
Date:6 April, 2007
+ +
+

Introduction

+

Writing tests for the numpy.i SWIG +interface file is a combinatorial headache. At present, 12 different +data types are supported, each with 23 different argument signatures, +for a total of 276 typemaps supported "out of the box". Each of these +typemaps, in turn, might require several unit tests in order to verify +expected behavior for both proper and improper inputs. Currently, +this results in 1,020 individual unit tests that are performed when +make test is run in the numpy/docs/swig subdirectory.

+

To facilitate this many similar unit tests, some high-level +programming techniques are employed, including C and SWIG macros, +as well as python inheritance. The +purpose of this document is to describe the testing infrastructure +employed to verify that the numpy.i typemaps are working as +expected.

+
+
+

Testing Organization

+

There are three indepedent testing frameworks supported, for one-, +two-, and three-dimensional arrays respectively. For one-dimensional +arrays, there are two C++ files, a header and a source, named:

+
+Vector.h
+Vector.cxx
+
+

that contain prototypes and code for a variety of functions that have +one-dimensional arrays as function arguments. The file:

+
+Vector.i
+
+

is a SWIG interface file that defines a python module Vector +that wraps the functions in Vector.h while utilizing the typemaps +in numpy.i to correctly handle the C arrays.

+

The Makefile calls swig to generate Vector.py and +Vector_wrap.cxx, and also executes the setup.py script that +compiles Vector_wrap.cxx and links together the extension module +_Vector.so or _Vector.dylib, depending on the platform. This +extension module and the proxy file Vector.py are both placed in a +subdirectory under the build directory.

+

The actual testing takes place with a python script named:

+
+testVector.py
+
+

that uses the standard python library module unittest, which +performs several tests of each function defined in Vector.h for +each data type supported.

+

Two-dimensional arrays are tested in exactly the same manner. The +above description applies, but with Matrix substituted for +Vector. For three-dimensional tests, substitute Tensor for +Vector. For the descriptions that follow, we will reference the +Vector tests, but the same information applies to Matrix and +Tensor tests.

+

The command make test will ensure that all of the test software is +built and then run all three test scripts.

+
+
+

Testing Header Files

+

Vector.h is a C++ header file that defines a C macro called +TEST_FUNC_PROTOS that takes two arguments: TYPE, which is a +data type name such as unsigned int; and SNAME, which is a +short name for the same data type with no spaces, e.g. uint. This +macro defines several function prototypes that have the prefix +SNAME and have at least one argument that is an array of type +TYPE. Those functions that have return arguments return a +TYPE value.

+

TEST_FUNC_PROTOS is then implemented for all of the data types +supported by numpy.i:

+
+
    +
  • signed char
  • +
  • unsigned char
  • +
  • short
  • +
  • unsigned short
  • +
  • int
  • +
  • unsigned int
  • +
  • long
  • +
  • unsigned long
  • +
  • long long
  • +
  • unsigned long long
  • +
  • float
  • +
  • double
  • +
+
+
+
+

Testing Source Files

+

Vector.cxx is a C++ source file that implements compilable code +for each of the function prototypes specified in Vector.h. It +defines a C macro TEST_FUNCS that has the same arguments and works +in the same way as TEST_FUNC_PROTOS does in Vector.h. +TEST_FUNCS is implemented for each of the 12 data types as above.

+
+
+

Testing SWIG Interface Files

+

Vector.i is a SWIG interface file that defines python module +Vector. It follows the conventions for using numpy.i as +described in the numpy.i documentation. It +defines a SWIG macro %apply_numpy_typemaps that has a single +argument TYPE. It uses the SWIG directive %apply as +described in the numpy.i documentation to apply the provided +typemaps to the argument signatures found in Vector.h. This macro +is then implemented for all of the data types supported by +numpy.i. It then does a %include "Vector.h" to wrap all of +the function prototypes in Vector.h using the typemaps in +numpy.i.

+
+
+

Testing Python Scripts

+

After make is used to build the testing extension modules, +testVector.py can be run to execute the tests. As with other +scripts that use unittest to facilitate unit testing, +testVector.py defines a class that inherits from +unittest.TestCase:

+
+class VectorTestCase(unittest.TestCase):
+
+

However, this class is not run directly. Rather, it serves as a base +class to several other python classes, each one specific to a +particular data type. The VectorTestCase class stores two strings +for typing information:

+
+
+
self.typeStr
+
A string that matches one of the SNAME prefixes used in +Vector.h and Vector.cxx. For example, "double".
+
self.typeCode
+
A short (typically single-character) string that represents a +data type in numpy and corresponds to self.typeStr. For +example, if self.typeStr is "double", then +self.typeCode should be "d".
+
+
+

Each test defined by the VectorTestCase class extracts the python +function it is trying to test by accessing the Vector module's +dictionary:

+
+length = Vector.__dict__[self.typeStr + "Length"]
+
+

In the case of double precision tests, this will return the python +function Vector.doubleLength.

+

We then define a new test case class for each supported data type with +a short definition such as:

+
+class doubleTestCase(VectorTestCase):
+    def __init__(self, methodName="runTest"):
+        VectorTestCase.__init__(self, methodName)
+        self.typeStr  = "double"
+        self.typeCode = "d"
+
+

Each of these 12 classes is collected into a unittest.TestSuite, +which is then executed. Errors and failures are summed together and +returned as the exit argument. Any non-zero result indicates that at +least one test did not pass.

+
+
+ + + diff --git a/doc/swig/doc/testing.pdf b/doc/swig/doc/testing.pdf new file mode 100644 index 000000000..9ffcf7575 Binary files /dev/null and b/doc/swig/doc/testing.pdf differ diff --git a/doc/swig/doc/testing.txt b/doc/swig/doc/testing.txt new file mode 100644 index 000000000..bfd5218e8 --- /dev/null +++ b/doc/swig/doc/testing.txt @@ -0,0 +1,173 @@ +============================ +Testing the numpy.i Typemaps +============================ + +:Author: Bill Spotz +:Institution: Sandia National Laboratories +:Date: 6 April, 2007 + +.. contents:: + +Introduction +============ + +Writing tests for the ``numpy.i`` `SWIG `_ +interface file is a combinatorial headache. At present, 12 different +data types are supported, each with 23 different argument signatures, +for a total of 276 typemaps supported "out of the box". Each of these +typemaps, in turn, might require several unit tests in order to verify +expected behavior for both proper and improper inputs. Currently, +this results in 1,020 individual unit tests that are performed when +``make test`` is run in the ``numpy/docs/swig`` subdirectory. + +To facilitate this many similar unit tests, some high-level +programming techniques are employed, including C and `SWIG`_ macros, +as well as `python `_ inheritance. The +purpose of this document is to describe the testing infrastructure +employed to verify that the ``numpy.i`` typemaps are working as +expected. + +Testing Organization +==================== + +There are three indepedent testing frameworks supported, for one-, +two-, and three-dimensional arrays respectively. For one-dimensional +arrays, there are two C++ files, a header and a source, named:: + + Vector.h + Vector.cxx + +that contain prototypes and code for a variety of functions that have +one-dimensional arrays as function arguments. The file:: + + Vector.i + +is a `SWIG`_ interface file that defines a python module ``Vector`` +that wraps the functions in ``Vector.h`` while utilizing the typemaps +in ``numpy.i`` to correctly handle the C arrays. + +The ``Makefile`` calls ``swig`` to generate ``Vector.py`` and +``Vector_wrap.cxx``, and also executes the ``setup.py`` script that +compiles ``Vector_wrap.cxx`` and links together the extension module +``_Vector.so`` or ``_Vector.dylib``, depending on the platform. This +extension module and the proxy file ``Vector.py`` are both placed in a +subdirectory under the ``build`` directory. + +The actual testing takes place with a `python`_ script named:: + + testVector.py + +that uses the standard `python`_ library module ``unittest``, which +performs several tests of each function defined in ``Vector.h`` for +each data type supported. + +Two-dimensional arrays are tested in exactly the same manner. The +above description applies, but with ``Matrix`` substituted for +``Vector``. For three-dimensional tests, substitute ``Tensor`` for +``Vector``. For the descriptions that follow, we will reference the +``Vector`` tests, but the same information applies to ``Matrix`` and +``Tensor`` tests. + +The command ``make test`` will ensure that all of the test software is +built and then run all three test scripts. + +Testing Header Files +==================== + +``Vector.h`` is a C++ header file that defines a C macro called +``TEST_FUNC_PROTOS`` that takes two arguments: ``TYPE``, which is a +data type name such as ``unsigned int``; and ``SNAME``, which is a +short name for the same data type with no spaces, e.g. ``uint``. This +macro defines several function prototypes that have the prefix +``SNAME`` and have at least one argument that is an array of type +``TYPE``. Those functions that have return arguments return a +``TYPE`` value. + +``TEST_FUNC_PROTOS`` is then implemented for all of the data types +supported by ``numpy.i``: + + * ``signed char`` + * ``unsigned char`` + * ``short`` + * ``unsigned short`` + * ``int`` + * ``unsigned int`` + * ``long`` + * ``unsigned long`` + * ``long long`` + * ``unsigned long long`` + * ``float`` + * ``double`` + +Testing Source Files +==================== + +``Vector.cxx`` is a C++ source file that implements compilable code +for each of the function prototypes specified in ``Vector.h``. It +defines a C macro ``TEST_FUNCS`` that has the same arguments and works +in the same way as ``TEST_FUNC_PROTOS`` does in ``Vector.h``. +``TEST_FUNCS`` is implemented for each of the 12 data types as above. + +Testing SWIG Interface Files +============================ + +``Vector.i`` is a `SWIG`_ interface file that defines python module +``Vector``. It follows the conventions for using ``numpy.i`` as +described in the `numpy.i documentation `_. It +defines a `SWIG`_ macro ``%apply_numpy_typemaps`` that has a single +argument ``TYPE``. It uses the `SWIG`_ directive ``%apply`` as +described in the `numpy.i documentation`_ to apply the provided +typemaps to the argument signatures found in ``Vector.h``. This macro +is then implemented for all of the data types supported by +``numpy.i``. It then does a ``%include "Vector.h"`` to wrap all of +the function prototypes in ``Vector.h`` using the typemaps in +``numpy.i``. + +Testing Python Scripts +====================== + +After ``make`` is used to build the testing extension modules, +``testVector.py`` can be run to execute the tests. As with other +scripts that use ``unittest`` to facilitate unit testing, +``testVector.py`` defines a class that inherits from +``unittest.TestCase``:: + + class VectorTestCase(unittest.TestCase): + +However, this class is not run directly. Rather, it serves as a base +class to several other python classes, each one specific to a +particular data type. The ``VectorTestCase`` class stores two strings +for typing information: + + **self.typeStr** + A string that matches one of the ``SNAME`` prefixes used in + ``Vector.h`` and ``Vector.cxx``. For example, ``"double"``. + + **self.typeCode** + A short (typically single-character) string that represents a + data type in numpy and corresponds to ``self.typeStr``. For + example, if ``self.typeStr`` is ``"double"``, then + ``self.typeCode`` should be ``"d"``. + +Each test defined by the ``VectorTestCase`` class extracts the python +function it is trying to test by accessing the ``Vector`` module's +dictionary:: + + length = Vector.__dict__[self.typeStr + "Length"] + +In the case of double precision tests, this will return the python +function ``Vector.doubleLength``. + +We then define a new test case class for each supported data type with +a short definition such as:: + + class doubleTestCase(VectorTestCase): + def __init__(self, methodName="runTest"): + VectorTestCase.__init__(self, methodName) + self.typeStr = "double" + self.typeCode = "d" + +Each of these 12 classes is collected into a ``unittest.TestSuite``, +which is then executed. Errors and failures are summed together and +returned as the exit argument. Any non-zero result indicates that at +least one test did not pass. diff --git a/doc/swig/numpy.i b/doc/swig/numpy.i new file mode 100644 index 000000000..72fc4f9c4 --- /dev/null +++ b/doc/swig/numpy.i @@ -0,0 +1,1634 @@ +/* -*- C -*- (not really, but good for syntax highlighting) */ +#ifdef SWIGPYTHON + +%{ +#ifndef SWIG_FILE_WITH_INIT +# define NO_IMPORT_ARRAY +#endif +#include "stdio.h" +#include +%} + +/**********************************************************************/ + +%fragment("NumPy_Backward_Compatibility", "header") +{ +/* Support older NumPy data type names +*/ +%#if NDARRAY_VERSION < 0x01000000 +%#define NPY_BOOL PyArray_BOOL +%#define NPY_BYTE PyArray_BYTE +%#define NPY_UBYTE PyArray_UBYTE +%#define NPY_SHORT PyArray_SHORT +%#define NPY_USHORT PyArray_USHORT +%#define NPY_INT PyArray_INT +%#define NPY_UINT PyArray_UINT +%#define NPY_LONG PyArray_LONG +%#define NPY_ULONG PyArray_ULONG +%#define NPY_LONGLONG PyArray_LONGLONG +%#define NPY_ULONGLONG PyArray_ULONGLONG +%#define NPY_FLOAT PyArray_FLOAT +%#define NPY_DOUBLE PyArray_DOUBLE +%#define NPY_LONGDOUBLE PyArray_LONGDOUBLE +%#define NPY_CFLOAT PyArray_CFLOAT +%#define NPY_CDOUBLE PyArray_CDOUBLE +%#define NPY_CLONGDOUBLE PyArray_CLONGDOUBLE +%#define NPY_OBJECT PyArray_OBJECT +%#define NPY_STRING PyArray_STRING +%#define NPY_UNICODE PyArray_UNICODE +%#define NPY_VOID PyArray_VOID +%#define NPY_NTYPES PyArray_NTYPES +%#define NPY_NOTYPE PyArray_NOTYPE +%#define NPY_CHAR PyArray_CHAR +%#define NPY_USERDEF PyArray_USERDEF +%#define npy_intp intp + +%#define NPY_MAX_BYTE MAX_BYTE +%#define NPY_MIN_BYTE MIN_BYTE +%#define NPY_MAX_UBYTE MAX_UBYTE +%#define NPY_MAX_SHORT MAX_SHORT +%#define NPY_MIN_SHORT MIN_SHORT +%#define NPY_MAX_USHORT MAX_USHORT +%#define NPY_MAX_INT MAX_INT +%#define NPY_MIN_INT MIN_INT +%#define NPY_MAX_UINT MAX_UINT +%#define NPY_MAX_LONG MAX_LONG +%#define NPY_MIN_LONG MIN_LONG +%#define NPY_MAX_ULONG MAX_ULONG +%#define NPY_MAX_LONGLONG MAX_LONGLONG +%#define NPY_MIN_LONGLONG MIN_LONGLONG +%#define NPY_MAX_ULONGLONG MAX_ULONGLONG +%#define NPY_MAX_INTP MAX_INTP +%#define NPY_MIN_INTP MIN_INTP + +%#define NPY_FARRAY FARRAY +%#define NPY_F_CONTIGUOUS F_CONTIGUOUS +%#endif +} + +/**********************************************************************/ + +/* The following code originally appeared in + * enthought/kiva/agg/src/numeric.i written by Eric Jones. It was + * translated from C++ to C by John Hunter. Bill Spotz has modified + * it to fix some minor bugs, upgrade from Numeric to numpy (all + * versions), add some comments and functionality, and convert from + * direct code insertion to SWIG fragments. + */ + +%fragment("NumPy_Macros", "header") +{ +/* Macros to extract array attributes. + */ +%#define is_array(a) ((a) && PyArray_Check((PyArrayObject *)a)) +%#define array_type(a) (int)(PyArray_TYPE(a)) +%#define array_numdims(a) (((PyArrayObject *)a)->nd) +%#define array_dimensions(a) (((PyArrayObject *)a)->dimensions) +%#define array_size(a,i) (((PyArrayObject *)a)->dimensions[i]) +%#define array_data(a) (((PyArrayObject *)a)->data) +%#define array_is_contiguous(a) (PyArray_ISCONTIGUOUS(a)) +%#define array_is_native(a) (PyArray_ISNOTSWAPPED(a)) +%#define array_is_fortran(a) (PyArray_ISFORTRAN(a)) +} + +/**********************************************************************/ + +%fragment("NumPy_Utilities", "header") +{ + /* Given a PyObject, return a string describing its type. + */ + char* pytype_string(PyObject* py_obj) { + if (py_obj == NULL ) return "C NULL value"; + if (py_obj == Py_None ) return "Python None" ; + if (PyCallable_Check(py_obj)) return "callable" ; + if (PyString_Check( py_obj)) return "string" ; + if (PyInt_Check( py_obj)) return "int" ; + if (PyFloat_Check( py_obj)) return "float" ; + if (PyDict_Check( py_obj)) return "dict" ; + if (PyList_Check( py_obj)) return "list" ; + if (PyTuple_Check( py_obj)) return "tuple" ; + if (PyFile_Check( py_obj)) return "file" ; + if (PyModule_Check( py_obj)) return "module" ; + if (PyInstance_Check(py_obj)) return "instance" ; + + return "unkown type"; + } + + /* Given a NumPy typecode, return a string describing the type. + */ + char* typecode_string(int typecode) { + static char* type_names[25] = {"bool", "byte", "unsigned byte", + "short", "unsigned short", "int", + "unsigned int", "long", "unsigned long", + "long long", "unsigned long long", + "float", "double", "long double", + "complex float", "complex double", + "complex long double", "object", + "string", "unicode", "void", "ntypes", + "notype", "char", "unknown"}; + return typecode < 24 ? type_names[typecode] : type_names[24]; + } + + /* Make sure input has correct numpy type. Allow character and byte + * to match. Also allow int and long to match. This is deprecated. + * You should use PyArray_EquivTypenums() instead. + */ + int type_match(int actual_type, int desired_type) { + return PyArray_EquivTypenums(actual_type, desired_type); + } +} + +/**********************************************************************/ + +%fragment("NumPy_Object_to_Array", "header", + fragment="NumPy_Backward_Compatibility", + fragment="NumPy_Macros", + fragment="NumPy_Utilities") +{ + /* Given a PyObject pointer, cast it to a PyArrayObject pointer if + * legal. If not, set the python error string appropriately and + * return NULL. + */ + PyArrayObject* obj_to_array_no_conversion(PyObject* input, int typecode) + { + PyArrayObject* ary = NULL; + if (is_array(input) && (typecode == NPY_NOTYPE || + PyArray_EquivTypenums(array_type(input), typecode))) + { + ary = (PyArrayObject*) input; + } + else if is_array(input) + { + char* desired_type = typecode_string(typecode); + char* actual_type = typecode_string(array_type(input)); + PyErr_Format(PyExc_TypeError, + "Array of type '%s' required. Array of type '%s' given", + desired_type, actual_type); + ary = NULL; + } + else + { + char * desired_type = typecode_string(typecode); + char * actual_type = pytype_string(input); + PyErr_Format(PyExc_TypeError, + "Array of type '%s' required. A '%s' was given", + desired_type, actual_type); + ary = NULL; + } + return ary; + } + + /* Convert the given PyObject to a NumPy array with the given + * typecode. On success, return a valid PyArrayObject* with the + * correct type. On failure, the python error string will be set and + * the routine returns NULL. + */ + PyArrayObject* obj_to_array_allow_conversion(PyObject* input, int typecode, + int* is_new_object) + { + PyArrayObject* ary = NULL; + PyObject* py_obj; + if (is_array(input) && (typecode == NPY_NOTYPE || + PyArray_EquivTypenums(array_type(input),typecode))) + { + ary = (PyArrayObject*) input; + *is_new_object = 0; + } + else + { + py_obj = PyArray_FROMANY(input, typecode, 0, 0, NPY_DEFAULT); + /* If NULL, PyArray_FromObject will have set python error value.*/ + ary = (PyArrayObject*) py_obj; + *is_new_object = 1; + } + return ary; + } + + /* Given a PyArrayObject, check to see if it is contiguous. If so, + * return the input pointer and flag it as not a new object. If it is + * not contiguous, create a new PyArrayObject using the original data, + * flag it as a new object and return the pointer. + */ + PyArrayObject* make_contiguous(PyArrayObject* ary, int* is_new_object, + int min_dims, int max_dims) + { + PyArrayObject* result; + if (array_is_contiguous(ary)) + { + result = ary; + *is_new_object = 0; + } + else + { + result = (PyArrayObject*) PyArray_ContiguousFromObject((PyObject*)ary, + array_type(ary), + min_dims, + max_dims); + *is_new_object = 1; + } + return result; + } + + /* Given a PyArrayObject, check to see if it is Fortran-contiguous. + * If so, return the input pointer, but do not flag it as not a new + * object. If it is not Fortran-contiguous, create a new + * PyArrayObject using the original data, flag it as a new object + * and return the pointer. + */ + PyArrayObject* make_fortran(PyArrayObject* ary, int* is_new_object, + int min_dims, int max_dims) + { + PyArrayObject* result; + if (array_is_fortran(ary)) + { + result = ary; + *is_new_object = 0; + } + else + { + Py_INCREF(ary->descr); + result = (PyArrayObject*) PyArray_FromArray(ary, ary->descr, NPY_FORTRAN); + *is_new_object = 1; + } + return result; + } + + /* Convert a given PyObject to a contiguous PyArrayObject of the + * specified type. If the input object is not a contiguous + * PyArrayObject, a new one will be created and the new object flag + * will be set. + */ + PyArrayObject* obj_to_array_contiguous_allow_conversion(PyObject* input, + int typecode, + int* is_new_object) + { + int is_new1 = 0; + int is_new2 = 0; + PyArrayObject* ary2; + PyArrayObject* ary1 = obj_to_array_allow_conversion(input, typecode, + &is_new1); + if (ary1) + { + ary2 = make_contiguous(ary1, &is_new2, 0, 0); + if ( is_new1 && is_new2) + { + Py_DECREF(ary1); + } + ary1 = ary2; + } + *is_new_object = is_new1 || is_new2; + return ary1; + } + + /* Convert a given PyObject to a Fortran-ordered PyArrayObject of the + * specified type. If the input object is not a Fortran-ordered + * PyArrayObject, a new one will be created and the new object flag + * will be set. + */ + PyArrayObject* obj_to_array_fortran_allow_conversion(PyObject* input, + int typecode, + int* is_new_object) + { + int is_new1 = 0; + int is_new2 = 0; + PyArrayObject* ary2; + PyArrayObject* ary1 = obj_to_array_allow_conversion(input, typecode, + &is_new1); + if (ary1) + { + ary2 = make_fortran(ary1, &is_new2, 0, 0); + if (is_new1 && is_new2) + { + Py_DECREF(ary1); + } + ary1 = ary2; + } + *is_new_object = is_new1 || is_new2; + return ary1; + } + +} /* end fragment */ + + +/**********************************************************************/ + +%fragment("NumPy_Array_Requirements", "header", + fragment="NumPy_Backward_Compatibility", + fragment="NumPy_Macros") +{ + /* Test whether a python object is contiguous. If array is + * contiguous, return 1. Otherwise, set the python error string and + * return 0. + */ + int require_contiguous(PyArrayObject* ary) + { + int contiguous = 1; + if (!array_is_contiguous(ary)) + { + PyErr_SetString(PyExc_TypeError, + "Array must be contiguous. A non-contiguous array was given"); + contiguous = 0; + } + return contiguous; + } + + /* Require that a numpy array is not byte-swapped. If the array is + * not byte-swapped, return 1. Otherwise, set the python error string + * and return 0. + */ + int require_native(PyArrayObject* ary) + { + int native = 1; + if (!array_is_native(ary)) + { + PyErr_SetString(PyExc_TypeError, + "Array must have native byteorder. " + "A byte-swapped array was given"); + native = 0; + } + return native; + } + + /* Require the given PyArrayObject to have a specified number of + * dimensions. If the array has the specified number of dimensions, + * return 1. Otherwise, set the python error string and return 0. + */ + int require_dimensions(PyArrayObject* ary, int exact_dimensions) + { + int success = 1; + if (array_numdims(ary) != exact_dimensions) + { + PyErr_Format(PyExc_TypeError, + "Array must have %d dimensions. Given array has %d dimensions", + exact_dimensions, array_numdims(ary)); + success = 0; + } + return success; + } + + /* Require the given PyArrayObject to have one of a list of specified + * number of dimensions. If the array has one of the specified number + * of dimensions, return 1. Otherwise, set the python error string + * and return 0. + */ + int require_dimensions_n(PyArrayObject* ary, int* exact_dimensions, int n) + { + int success = 0; + int i; + char dims_str[255] = ""; + char s[255]; + for (i = 0; i < n && !success; i++) + { + if (array_numdims(ary) == exact_dimensions[i]) + { + success = 1; + } + } + if (!success) + { + for (i = 0; i < n-1; i++) + { + sprintf(s, "%d, ", exact_dimensions[i]); + strcat(dims_str,s); + } + sprintf(s, " or %d", exact_dimensions[n-1]); + strcat(dims_str,s); + PyErr_Format(PyExc_TypeError, + "Array must have %s dimensions. Given array has %d dimensions", + dims_str, array_numdims(ary)); + } + return success; + } + + /* Require the given PyArrayObject to have a specified shape. If the + * array has the specified shape, return 1. Otherwise, set the python + * error string and return 0. + */ + int require_size(PyArrayObject* ary, npy_intp* size, int n) + { + int i; + int success = 1; + int len; + char desired_dims[255] = "["; + char s[255]; + char actual_dims[255] = "["; + for(i=0; i < n;i++) + { + if (size[i] != -1 && size[i] != array_size(ary,i)) + { + success = 0; + } + } + if (!success) + { + for (i = 0; i < n; i++) + { + if (size[i] == -1) + { + sprintf(s, "*,"); + } + else + { + sprintf(s, "%ld,", (long int)size[i]); + } + strcat(desired_dims,s); + } + len = strlen(desired_dims); + desired_dims[len-1] = ']'; + for (i = 0; i < n; i++) + { + sprintf(s, "%ld,", (long int)array_size(ary,i)); + strcat(actual_dims,s); + } + len = strlen(actual_dims); + actual_dims[len-1] = ']'; + PyErr_Format(PyExc_TypeError, + "Array must have shape of %s. Given array has shape of %s", + desired_dims, actual_dims); + } + return success; + } + + /* Require the given PyArrayObject to to be FORTRAN ordered. If the + * the PyArrayObject is already FORTRAN ordered, do nothing. Else, + * set the FORTRAN ordering flag and recompute the strides. + */ + int require_fortran(PyArrayObject* ary) + { + int success = 1; + int nd = array_numdims(ary); + int i; + if (array_is_fortran(ary)) return success; + /* Set the FORTRAN ordered flag */ + ary->flags = NPY_FARRAY; + /* Recompute the strides */ + ary->strides[0] = ary->strides[nd-1]; + for (i=1; i < nd; ++i) + ary->strides[i] = ary->strides[i-1] * array_size(ary,i-1); + return success; + } +} + +/* Combine all NumPy fragments into one for convenience */ +%fragment("NumPy_Fragments", "header", + fragment="NumPy_Backward_Compatibility", + fragment="NumPy_Macros", + fragment="NumPy_Utilities", + fragment="NumPy_Object_to_Array", + fragment="NumPy_Array_Requirements") { } + +/* End John Hunter translation (with modifications by Bill Spotz) + */ + +/* %numpy_typemaps() macro + * + * This macro defines a family of 41 typemaps that allow C arguments + * of the form + * + * (DATA_TYPE IN_ARRAY1[ANY]) + * (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) + * (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) + * + * (DATA_TYPE IN_ARRAY2[ANY][ANY]) + * (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) + * (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) + * (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) + * (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) + * + * (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) + * (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) + * (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) + * (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) + * (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) + * + * (DATA_TYPE INPLACE_ARRAY1[ANY]) + * (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) + * (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) + * + * (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) + * (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) + * (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) + * (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) + * (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) + * + * (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) + * (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) + * (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3) + * (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) + * (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3) + * + * (DATA_TYPE ARGOUT_ARRAY1[ANY]) + * (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) + * (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) + * + * (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) + * + * (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) + * + * (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1) + * (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1) + * + * (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) + * (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2) + * (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) + * (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2) + * + * (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) + * (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3) + * (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) + * (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3) + * + * where "DATA_TYPE" is any type supported by the NumPy module, and + * "DIM_TYPE" is any int-like type suitable for specifying dimensions. + * The difference between "ARRAY" typemaps and "FARRAY" typemaps is + * that the "FARRAY" typemaps expect FORTRAN ordering of + * multidimensional arrays. In python, the dimensions will not need + * to be specified (except for the "DATA_TYPE* ARGOUT_ARRAY1" + * typemaps). The IN_ARRAYs can be a numpy array or any sequence that + * can be converted to a numpy array of the specified type. The + * INPLACE_ARRAYs must be numpy arrays of the appropriate type. The + * ARGOUT_ARRAYs will be returned as new numpy arrays of the + * appropriate type. + * + * These typemaps can be applied to existing functions using the + * %apply directive. For example: + * + * %apply (double* IN_ARRAY1, int DIM1) {(double* series, int length)}; + * double prod(double* series, int length); + * + * %apply (int DIM1, int DIM2, double* INPLACE_ARRAY2) + * {(int rows, int cols, double* matrix )}; + * void floor(int rows, int cols, double* matrix, double f); + * + * %apply (double IN_ARRAY3[ANY][ANY][ANY]) + * {(double tensor[2][2][2] )}; + * %apply (double ARGOUT_ARRAY3[ANY][ANY][ANY]) + * {(double low[2][2][2] )}; + * %apply (double ARGOUT_ARRAY3[ANY][ANY][ANY]) + * {(double upp[2][2][2] )}; + * void luSplit(double tensor[2][2][2], + * double low[2][2][2], + * double upp[2][2][2] ); + * + * or directly with + * + * double prod(double* IN_ARRAY1, int DIM1); + * + * void floor(int DIM1, int DIM2, double* INPLACE_ARRAY2, double f); + * + * void luSplit(double IN_ARRAY3[ANY][ANY][ANY], + * double ARGOUT_ARRAY3[ANY][ANY][ANY], + * double ARGOUT_ARRAY3[ANY][ANY][ANY]); + */ + +%define %numpy_typemaps(DATA_TYPE, DATA_TYPECODE, DIM_TYPE) + +/************************/ +/* Input Array Typemaps */ +/************************/ + +/* Typemap suite for (DATA_TYPE IN_ARRAY1[ANY]) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DATA_TYPE IN_ARRAY1[ANY]) +{ + $1 = is_array($input) || PySequence_Check($input); +} +%typemap(in, + fragment="NumPy_Fragments") + (DATA_TYPE IN_ARRAY1[ANY]) + (PyArrayObject* array=NULL, int is_new_object=0) +{ + npy_intp size[1] = { $1_dim0 }; + array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, + &is_new_object); + if (!array || !require_dimensions(array, 1) || + !require_size(array, size, 1)) SWIG_fail; + $1 = ($1_ltype) array_data(array); +} +%typemap(freearg) + (DATA_TYPE IN_ARRAY1[ANY]) +{ + if (is_new_object$argnum && array$argnum) + { Py_DECREF(array$argnum); } +} + +/* Typemap suite for (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) +{ + $1 = is_array($input) || PySequence_Check($input); +} +%typemap(in, + fragment="NumPy_Fragments") + (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) + (PyArrayObject* array=NULL, int is_new_object=0) +{ + npy_intp size[1] = { -1 }; + array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, + &is_new_object); + if (!array || !require_dimensions(array, 1) || + !require_size(array, size, 1)) SWIG_fail; + $1 = (DATA_TYPE*) array_data(array); + $2 = (DIM_TYPE) array_size(array,0); +} +%typemap(freearg) + (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) +{ + if (is_new_object$argnum && array$argnum) + { Py_DECREF(array$argnum); } +} + +/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) +{ + $1 = is_array($input) || PySequence_Check($input); +} +%typemap(in, + fragment="NumPy_Fragments") + (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) + (PyArrayObject* array=NULL, int is_new_object=0) +{ + npy_intp size[1] = {-1}; + array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, + &is_new_object); + if (!array || !require_dimensions(array, 1) || + !require_size(array, size, 1)) SWIG_fail; + $1 = (DIM_TYPE) array_size(array,0); + $2 = (DATA_TYPE*) array_data(array); +} +%typemap(freearg) + (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) +{ + if (is_new_object$argnum && array$argnum) + { Py_DECREF(array$argnum); } +} + +/* Typemap suite for (DATA_TYPE IN_ARRAY2[ANY][ANY]) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DATA_TYPE IN_ARRAY2[ANY][ANY]) +{ + $1 = is_array($input) || PySequence_Check($input); +} +%typemap(in, + fragment="NumPy_Fragments") + (DATA_TYPE IN_ARRAY2[ANY][ANY]) + (PyArrayObject* array=NULL, int is_new_object=0) +{ + npy_intp size[2] = { $1_dim0, $1_dim1 }; + array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, + &is_new_object); + if (!array || !require_dimensions(array, 2) || + !require_size(array, size, 2)) SWIG_fail; + $1 = ($1_ltype) array_data(array); +} +%typemap(freearg) + (DATA_TYPE IN_ARRAY2[ANY][ANY]) +{ + if (is_new_object$argnum && array$argnum) + { Py_DECREF(array$argnum); } +} + +/* Typemap suite for (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) +{ + $1 = is_array($input) || PySequence_Check($input); +} +%typemap(in, + fragment="NumPy_Fragments") + (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) + (PyArrayObject* array=NULL, int is_new_object=0) +{ + npy_intp size[2] = { -1, -1 }; + array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, + &is_new_object); + if (!array || !require_dimensions(array, 2) || + !require_size(array, size, 2)) SWIG_fail; + $1 = (DATA_TYPE*) array_data(array); + $2 = (DIM_TYPE) array_size(array,0); + $3 = (DIM_TYPE) array_size(array,1); +} +%typemap(freearg) + (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) +{ + if (is_new_object$argnum && array$argnum) + { Py_DECREF(array$argnum); } +} + +/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) +{ + $1 = is_array($input) || PySequence_Check($input); +} +%typemap(in, + fragment="NumPy_Fragments") + (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) + (PyArrayObject* array=NULL, int is_new_object=0) +{ + npy_intp size[2] = { -1, -1 }; + array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, + &is_new_object); + if (!array || !require_dimensions(array, 2) || + !require_size(array, size, 2)) SWIG_fail; + $1 = (DIM_TYPE) array_size(array,0); + $2 = (DIM_TYPE) array_size(array,1); + $3 = (DATA_TYPE*) array_data(array); +} +%typemap(freearg) + (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) +{ + if (is_new_object$argnum && array$argnum) + { Py_DECREF(array$argnum); } +} + +/* Typemap suite for (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) +{ + $1 = is_array($input) || PySequence_Check($input); +} +%typemap(in, + fragment="NumPy_Fragments") + (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) + (PyArrayObject* array=NULL, int is_new_object=0) +{ + npy_intp size[2] = { -1, -1 }; + array = obj_to_array_fortran_allow_conversion($input, DATA_TYPECODE, + &is_new_object); + if (!array || !require_dimensions(array, 2) || + !require_size(array, size, 2) || !require_fortran(array)) SWIG_fail; + $1 = (DATA_TYPE*) array_data(array); + $2 = (DIM_TYPE) array_size(array,0); + $3 = (DIM_TYPE) array_size(array,1); +} +%typemap(freearg) + (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) +{ + if (is_new_object$argnum && array$argnum) + { Py_DECREF(array$argnum); } +} + +/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) +{ + $1 = is_array($input) || PySequence_Check($input); +} +%typemap(in, + fragment="NumPy_Fragments") + (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) + (PyArrayObject* array=NULL, int is_new_object=0) +{ + npy_intp size[2] = { -1, -1 }; + array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, + &is_new_object); + if (!array || !require_dimensions(array, 2) || + !require_size(array, size, 2) || !require_fortran(array)) SWIG_fail; + $1 = (DIM_TYPE) array_size(array,0); + $2 = (DIM_TYPE) array_size(array,1); + $3 = (DATA_TYPE*) array_data(array); +} +%typemap(freearg) + (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) +{ + if (is_new_object$argnum && array$argnum) + { Py_DECREF(array$argnum); } +} + +/* Typemap suite for (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) +{ + $1 = is_array($input) || PySequence_Check($input); +} +%typemap(in, + fragment="NumPy_Fragments") + (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) + (PyArrayObject* array=NULL, int is_new_object=0) +{ + npy_intp size[3] = { $1_dim0, $1_dim1, $1_dim2 }; + array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, + &is_new_object); + if (!array || !require_dimensions(array, 3) || + !require_size(array, size, 3)) SWIG_fail; + $1 = ($1_ltype) array_data(array); +} +%typemap(freearg) + (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) +{ + if (is_new_object$argnum && array$argnum) + { Py_DECREF(array$argnum); } +} + +/* Typemap suite for (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, + * DIM_TYPE DIM3) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) +{ + $1 = is_array($input) || PySequence_Check($input); +} +%typemap(in, + fragment="NumPy_Fragments") + (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) + (PyArrayObject* array=NULL, int is_new_object=0) +{ + npy_intp size[3] = { -1, -1, -1 }; + array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, + &is_new_object); + if (!array || !require_dimensions(array, 3) || + !require_size(array, size, 3)) SWIG_fail; + $1 = (DATA_TYPE*) array_data(array); + $2 = (DIM_TYPE) array_size(array,0); + $3 = (DIM_TYPE) array_size(array,1); + $4 = (DIM_TYPE) array_size(array,2); +} +%typemap(freearg) + (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) +{ + if (is_new_object$argnum && array$argnum) + { Py_DECREF(array$argnum); } +} + +/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, + * DATA_TYPE* IN_ARRAY3) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) +{ + $1 = is_array($input) || PySequence_Check($input); +} +%typemap(in, + fragment="NumPy_Fragments") + (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) + (PyArrayObject* array=NULL, int is_new_object=0) +{ + npy_intp size[3] = { -1, -1, -1 }; + array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, + &is_new_object); + if (!array || !require_dimensions(array, 3) || + !require_size(array, size, 3)) SWIG_fail; + $1 = (DIM_TYPE) array_size(array,0); + $2 = (DIM_TYPE) array_size(array,1); + $3 = (DIM_TYPE) array_size(array,2); + $4 = (DATA_TYPE*) array_data(array); +} +%typemap(freearg) + (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) +{ + if (is_new_object$argnum && array$argnum) + { Py_DECREF(array$argnum); } +} + +/* Typemap suite for (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, + * DIM_TYPE DIM3) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) +{ + $1 = is_array($input) || PySequence_Check($input); +} +%typemap(in, + fragment="NumPy_Fragments") + (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) + (PyArrayObject* array=NULL, int is_new_object=0) +{ + npy_intp size[3] = { -1, -1, -1 }; + array = obj_to_array_fortran_allow_conversion($input, DATA_TYPECODE, + &is_new_object); + if (!array || !require_dimensions(array, 3) || + !require_size(array, size, 3) | !require_fortran(array)) SWIG_fail; + $1 = (DATA_TYPE*) array_data(array); + $2 = (DIM_TYPE) array_size(array,0); + $3 = (DIM_TYPE) array_size(array,1); + $4 = (DIM_TYPE) array_size(array,2); +} +%typemap(freearg) + (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) +{ + if (is_new_object$argnum && array$argnum) + { Py_DECREF(array$argnum); } +} + +/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, + * DATA_TYPE* IN_FARRAY3) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) +{ + $1 = is_array($input) || PySequence_Check($input); +} +%typemap(in, + fragment="NumPy_Fragments") + (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) + (PyArrayObject* array=NULL, int is_new_object=0) +{ + npy_intp size[3] = { -1, -1, -1 }; + array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, + &is_new_object); + if (!array || !require_dimensions(array, 3) || + !require_size(array, size, 3) || !require_fortran(array)) SWIG_fail; + $1 = (DIM_TYPE) array_size(array,0); + $2 = (DIM_TYPE) array_size(array,1); + $3 = (DIM_TYPE) array_size(array,2); + $4 = (DATA_TYPE*) array_data(array); +} +%typemap(freearg) + (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) +{ + if (is_new_object$argnum && array$argnum) + { Py_DECREF(array$argnum); } +} + +/***************************/ +/* In-Place Array Typemaps */ +/***************************/ + +/* Typemap suite for (DATA_TYPE INPLACE_ARRAY1[ANY]) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DATA_TYPE INPLACE_ARRAY1[ANY]) +{ + $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), + DATA_TYPECODE); +} +%typemap(in, + fragment="NumPy_Fragments") + (DATA_TYPE INPLACE_ARRAY1[ANY]) + (PyArrayObject* array=NULL) +{ + npy_intp size[1] = { $1_dim0 }; + array = obj_to_array_no_conversion($input, DATA_TYPECODE); + if (!array || !require_dimensions(array,1) || !require_size(array, size, 1) || + !require_contiguous(array) || !require_native(array)) SWIG_fail; + $1 = ($1_ltype) array_data(array); +} + +/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) +{ + $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), + DATA_TYPECODE); +} +%typemap(in, + fragment="NumPy_Fragments") + (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) + (PyArrayObject* array=NULL, int i=1) +{ + array = obj_to_array_no_conversion($input, DATA_TYPECODE); + if (!array || !require_dimensions(array,1) || !require_contiguous(array) + || !require_native(array)) SWIG_fail; + $1 = (DATA_TYPE*) array_data(array); + $2 = 1; + for (i=0; i < array_numdims(array); ++i) $2 *= array_size(array,i); +} + +/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) +{ + $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), + DATA_TYPECODE); +} +%typemap(in, + fragment="NumPy_Fragments") + (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) + (PyArrayObject* array=NULL, int i=0) +{ + array = obj_to_array_no_conversion($input, DATA_TYPECODE); + if (!array || !require_dimensions(array,1) || !require_contiguous(array) + || !require_native(array)) SWIG_fail; + $1 = 1; + for (i=0; i < array_numdims(array); ++i) $1 *= array_size(array,i); + $2 = (DATA_TYPE*) array_data(array); +} + +/* Typemap suite for (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) +{ + $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), + DATA_TYPECODE); +} +%typemap(in, + fragment="NumPy_Fragments") + (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) + (PyArrayObject* array=NULL) +{ + npy_intp size[2] = { $1_dim0, $1_dim1 }; + array = obj_to_array_no_conversion($input, DATA_TYPECODE); + if (!array || !require_dimensions(array,2) || !require_size(array, size, 2) || + !require_contiguous(array) || !require_native(array)) SWIG_fail; + $1 = ($1_ltype) array_data(array); +} + +/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) +{ + $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), + DATA_TYPECODE); +} +%typemap(in, + fragment="NumPy_Fragments") + (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) + (PyArrayObject* array=NULL) +{ + array = obj_to_array_no_conversion($input, DATA_TYPECODE); + if (!array || !require_dimensions(array,2) || !require_contiguous(array) + || !require_native(array)) SWIG_fail; + $1 = (DATA_TYPE*) array_data(array); + $2 = (DIM_TYPE) array_size(array,0); + $3 = (DIM_TYPE) array_size(array,1); +} + +/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) +{ + $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), + DATA_TYPECODE); +} +%typemap(in, + fragment="NumPy_Fragments") + (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) + (PyArrayObject* array=NULL) +{ + array = obj_to_array_no_conversion($input, DATA_TYPECODE); + if (!array || !require_dimensions(array,2) || !require_contiguous(array) || + !require_native(array)) SWIG_fail; + $1 = (DIM_TYPE) array_size(array,0); + $2 = (DIM_TYPE) array_size(array,1); + $3 = (DATA_TYPE*) array_data(array); +} + +/* Typemap suite for (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) +{ + $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), + DATA_TYPECODE); +} +%typemap(in, + fragment="NumPy_Fragments") + (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) + (PyArrayObject* array=NULL) +{ + array = obj_to_array_no_conversion($input, DATA_TYPECODE); + if (!array || !require_dimensions(array,2) || !require_contiguous(array) + || !require_native(array) || !require_fortran(array)) SWIG_fail; + $1 = (DATA_TYPE*) array_data(array); + $2 = (DIM_TYPE) array_size(array,0); + $3 = (DIM_TYPE) array_size(array,1); +} + +/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) +{ + $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), + DATA_TYPECODE); +} +%typemap(in, + fragment="NumPy_Fragments") + (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) + (PyArrayObject* array=NULL) +{ + array = obj_to_array_no_conversion($input, DATA_TYPECODE); + if (!array || !require_dimensions(array,2) || !require_contiguous(array) || + !require_native(array) || !require_fortran(array)) SWIG_fail; + $1 = (DIM_TYPE) array_size(array,0); + $2 = (DIM_TYPE) array_size(array,1); + $3 = (DATA_TYPE*) array_data(array); +} + +/* Typemap suite for (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) +{ + $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), + DATA_TYPECODE); +} +%typemap(in, + fragment="NumPy_Fragments") + (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) + (PyArrayObject* array=NULL) +{ + npy_intp size[3] = { $1_dim0, $1_dim1, $1_dim2 }; + array = obj_to_array_no_conversion($input, DATA_TYPECODE); + if (!array || !require_dimensions(array,3) || !require_size(array, size, 3) || + !require_contiguous(array) || !require_native(array)) SWIG_fail; + $1 = ($1_ltype) array_data(array); +} + +/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, + * DIM_TYPE DIM3) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) +{ + $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), + DATA_TYPECODE); +} +%typemap(in, + fragment="NumPy_Fragments") + (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) + (PyArrayObject* array=NULL) +{ + array = obj_to_array_no_conversion($input, DATA_TYPECODE); + if (!array || !require_dimensions(array,3) || !require_contiguous(array) || + !require_native(array)) SWIG_fail; + $1 = (DATA_TYPE*) array_data(array); + $2 = (DIM_TYPE) array_size(array,0); + $3 = (DIM_TYPE) array_size(array,1); + $4 = (DIM_TYPE) array_size(array,2); +} + +/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, + * DATA_TYPE* INPLACE_ARRAY3) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3) +{ + $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), + DATA_TYPECODE); +} +%typemap(in, + fragment="NumPy_Fragments") + (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3) + (PyArrayObject* array=NULL) +{ + array = obj_to_array_no_conversion($input, DATA_TYPECODE); + if (!array || !require_dimensions(array,3) || !require_contiguous(array) + || !require_native(array)) SWIG_fail; + $1 = (DIM_TYPE) array_size(array,0); + $2 = (DIM_TYPE) array_size(array,1); + $3 = (DIM_TYPE) array_size(array,2); + $4 = (DATA_TYPE*) array_data(array); +} + +/* Typemap suite for (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, + * DIM_TYPE DIM3) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) +{ + $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), + DATA_TYPECODE); +} +%typemap(in, + fragment="NumPy_Fragments") + (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) + (PyArrayObject* array=NULL) +{ + array = obj_to_array_no_conversion($input, DATA_TYPECODE); + if (!array || !require_dimensions(array,3) || !require_contiguous(array) || + !require_native(array) || !require_fortran(array)) SWIG_fail; + $1 = (DATA_TYPE*) array_data(array); + $2 = (DIM_TYPE) array_size(array,0); + $3 = (DIM_TYPE) array_size(array,1); + $4 = (DIM_TYPE) array_size(array,2); +} + +/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, + * DATA_TYPE* INPLACE_FARRAY3) + */ +%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, + fragment="NumPy_Macros") + (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3) +{ + $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), + DATA_TYPECODE); +} +%typemap(in, + fragment="NumPy_Fragments") + (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3) + (PyArrayObject* array=NULL) +{ + array = obj_to_array_no_conversion($input, DATA_TYPECODE); + if (!array || !require_dimensions(array,3) || !require_contiguous(array) + || !require_native(array) || !require_fortran(array)) SWIG_fail; + $1 = (DIM_TYPE) array_size(array,0); + $2 = (DIM_TYPE) array_size(array,1); + $3 = (DIM_TYPE) array_size(array,2); + $4 = (DATA_TYPE*) array_data(array); +} + +/*************************/ +/* Argout Array Typemaps */ +/*************************/ + +/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY1[ANY]) + */ +%typemap(in,numinputs=0, + fragment="NumPy_Backward_Compatibility,NumPy_Macros") + (DATA_TYPE ARGOUT_ARRAY1[ANY]) + (PyObject * array = NULL) +{ + npy_intp dims[1] = { $1_dim0 }; + array = PyArray_SimpleNew(1, dims, DATA_TYPECODE); + if (!array) SWIG_fail; + $1 = ($1_ltype) array_data(array); +} +%typemap(argout) + (DATA_TYPE ARGOUT_ARRAY1[ANY]) +{ + $result = SWIG_Python_AppendOutput($result,array$argnum); +} + +/* Typemap suite for (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) + */ +%typemap(in,numinputs=1, + fragment="NumPy_Fragments") + (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) + (PyObject * array = NULL) +{ + npy_intp dims[1]; + if (!PyInt_Check($input)) + { + char* typestring = pytype_string($input); + PyErr_Format(PyExc_TypeError, + "Int dimension expected. '%s' given.", + typestring); + SWIG_fail; + } + $2 = (DIM_TYPE) PyInt_AsLong($input); + dims[0] = (npy_intp) $2; + array = PyArray_SimpleNew(1, dims, DATA_TYPECODE); + if (!array) SWIG_fail; + $1 = (DATA_TYPE*) array_data(array); +} +%typemap(argout) + (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) +{ + $result = SWIG_Python_AppendOutput($result,array$argnum); +} + +/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) + */ +%typemap(in,numinputs=1, + fragment="NumPy_Fragments") + (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) + (PyObject * array = NULL) +{ + npy_intp dims[1]; + if (!PyInt_Check($input)) + { + char* typestring = pytype_string($input); + PyErr_Format(PyExc_TypeError, + "Int dimension expected. '%s' given.", + typestring); + SWIG_fail; + } + $1 = (DIM_TYPE) PyInt_AsLong($input); + dims[0] = (npy_intp) $1; + array = PyArray_SimpleNew(1, dims, DATA_TYPECODE); + if (!array) SWIG_fail; + $2 = (DATA_TYPE*) array_data(array); +} +%typemap(argout) + (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) +{ + $result = SWIG_Python_AppendOutput($result,array$argnum); +} + +/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) + */ +%typemap(in,numinputs=0, + fragment="NumPy_Backward_Compatibility,NumPy_Macros") + (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) + (PyObject * array = NULL) +{ + npy_intp dims[2] = { $1_dim0, $1_dim1 }; + array = PyArray_SimpleNew(2, dims, DATA_TYPECODE); + if (!array) SWIG_fail; + $1 = ($1_ltype) array_data(array); +} +%typemap(argout) + (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) +{ + $result = SWIG_Python_AppendOutput($result,array$argnum); +} + +/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) + */ +%typemap(in,numinputs=0, + fragment="NumPy_Backward_Compatibility,NumPy_Macros") + (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) + (PyObject * array = NULL) +{ + npy_intp dims[3] = { $1_dim0, $1_dim1, $1_dim2 }; + array = PyArray_SimpleNew(3, dims, DATA_TYPECODE); + if (!array) SWIG_fail; + $1 = ($1_ltype) array_data(array); +} +%typemap(argout) + (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) +{ + $result = SWIG_Python_AppendOutput($result,array$argnum); +} + +/*****************************/ +/* Argoutview Array Typemaps */ +/*****************************/ + +/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1) + */ +%typemap(in,numinputs=0) + (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1 ) + (DATA_TYPE* data_temp , DIM_TYPE dim_temp) +{ + $1 = &data_temp; + $2 = &dim_temp; +} +%typemap(argout, + fragment="NumPy_Backward_Compatibility") + (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1) +{ + npy_intp dims[1] = { *$2 }; + PyObject * array = PyArray_SimpleNewFromData(1, dims, DATA_TYPECODE, (void*)(*$1)); + if (!array) SWIG_fail; + $result = SWIG_Python_AppendOutput($result,array); +} + +/* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1) + */ +%typemap(in,numinputs=0) + (DIM_TYPE* DIM1 , DATA_TYPE** ARGOUTVIEW_ARRAY1) + (DIM_TYPE dim_temp, DATA_TYPE* data_temp ) +{ + $1 = &dim_temp; + $2 = &data_temp; +} +%typemap(argout, + fragment="NumPy_Backward_Compatibility") + (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1) +{ + npy_intp dims[1] = { *$1 }; + PyObject * array = PyArray_SimpleNewFromData(1, dims, DATA_TYPECODE, (void*)(*$2)); + if (!array) SWIG_fail; + $result = SWIG_Python_AppendOutput($result,array); +} + +/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) + */ +%typemap(in,numinputs=0) + (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 ) + (DATA_TYPE* data_temp , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp) +{ + $1 = &data_temp; + $2 = &dim1_temp; + $3 = &dim2_temp; +} +%typemap(argout, + fragment="NumPy_Backward_Compatibility") + (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) +{ + npy_intp dims[2] = { *$2, *$3 }; + PyObject * array = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$1)); + if (!array) SWIG_fail; + $result = SWIG_Python_AppendOutput($result,array); +} + +/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2) + */ +%typemap(in,numinputs=0) + (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DATA_TYPE** ARGOUTVIEW_ARRAY2) + (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DATA_TYPE* data_temp ) +{ + $1 = &dim1_temp; + $2 = &dim2_temp; + $3 = &data_temp; +} +%typemap(argout, + fragment="NumPy_Backward_Compatibility") + (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2) +{ + npy_intp dims[2] = { *$1, *$2 }; + PyObject * array = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$3)); + if (!array) SWIG_fail; + $result = SWIG_Python_AppendOutput($result,array); +} + +/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) + */ +%typemap(in,numinputs=0) + (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 ) + (DATA_TYPE* data_temp , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp) +{ + $1 = &data_temp; + $2 = &dim1_temp; + $3 = &dim2_temp; +} +%typemap(argout, + fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") + (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) +{ + npy_intp dims[2] = { *$2, *$3 }; + PyObject * obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$1)); + PyArrayObject * array = (PyArrayObject*) obj; + if (!array || !require_fortran(array)) SWIG_fail; + $result = SWIG_Python_AppendOutput($result,obj); +} + +/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2) + */ +%typemap(in,numinputs=0) + (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DATA_TYPE** ARGOUTVIEW_FARRAY2) + (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DATA_TYPE* data_temp ) +{ + $1 = &dim1_temp; + $2 = &dim2_temp; + $3 = &data_temp; +} +%typemap(argout, + fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") + (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2) +{ + npy_intp dims[2] = { *$1, *$2 }; + PyObject * obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$3)); + PyArrayObject * array = (PyArrayObject*) obj; + if (!array || !require_fortran(array)) SWIG_fail; + $result = SWIG_Python_AppendOutput($result,obj); +} + +/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, + DIM_TYPE* DIM3) + */ +%typemap(in,numinputs=0) + (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) + (DATA_TYPE* data_temp, DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp) +{ + $1 = &data_temp; + $2 = &dim1_temp; + $3 = &dim2_temp; + $4 = &dim3_temp; +} +%typemap(argout, + fragment="NumPy_Backward_Compatibility") + (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) +{ + npy_intp dims[3] = { *$2, *$3, *$4 }; + PyObject * array = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$1)); + if (!array) SWIG_fail; + $result = SWIG_Python_AppendOutput($result,array); +} + +/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, + DATA_TYPE** ARGOUTVIEW_ARRAY3) + */ +%typemap(in,numinputs=0) + (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3) + (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DATA_TYPE* data_temp) +{ + $1 = &dim1_temp; + $2 = &dim2_temp; + $3 = &dim3_temp; + $4 = &data_temp; +} +%typemap(argout, + fragment="NumPy_Backward_Compatibility") + (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3) +{ + npy_intp dims[3] = { *$1, *$2, *$3 }; + PyObject * array = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$3)); + if (!array) SWIG_fail; + $result = SWIG_Python_AppendOutput($result,array); +} + +/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, + DIM_TYPE* DIM3) + */ +%typemap(in,numinputs=0) + (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) + (DATA_TYPE* data_temp, DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp) +{ + $1 = &data_temp; + $2 = &dim1_temp; + $3 = &dim2_temp; + $4 = &dim3_temp; +} +%typemap(argout, + fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") + (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) +{ + npy_intp dims[3] = { *$2, *$3, *$4 }; + PyObject * obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$1)); + PyArrayObject * array = (PyArrayObject*) obj; + if (!array || require_fortran(array)) SWIG_fail; + $result = SWIG_Python_AppendOutput($result,obj); +} + +/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, + DATA_TYPE** ARGOUTVIEW_FARRAY3) + */ +%typemap(in,numinputs=0) + (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3) + (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DATA_TYPE* data_temp) +{ + $1 = &dim1_temp; + $2 = &dim2_temp; + $3 = &dim3_temp; + $4 = &data_temp; +} +%typemap(argout, + fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") + (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3) +{ + npy_intp dims[3] = { *$1, *$2, *$3 }; + PyObject * obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$3)); + PyArrayObject * array = (PyArrayObject*) obj; + if (!array || require_fortran(array)) SWIG_fail; + $result = SWIG_Python_AppendOutput($result,obj); +} + +%enddef /* %numpy_typemaps() macro */ +/* *************************************************************** */ + +/* Concrete instances of the %numpy_typemaps() macro: Each invocation + * below applies all of the typemaps above to the specified data type. + */ +%numpy_typemaps(signed char , NPY_BYTE , int) +%numpy_typemaps(unsigned char , NPY_UBYTE , int) +%numpy_typemaps(short , NPY_SHORT , int) +%numpy_typemaps(unsigned short , NPY_USHORT , int) +%numpy_typemaps(int , NPY_INT , int) +%numpy_typemaps(unsigned int , NPY_UINT , int) +%numpy_typemaps(long , NPY_LONG , int) +%numpy_typemaps(unsigned long , NPY_ULONG , int) +%numpy_typemaps(long long , NPY_LONGLONG , int) +%numpy_typemaps(unsigned long long, NPY_ULONGLONG, int) +%numpy_typemaps(float , NPY_FLOAT , int) +%numpy_typemaps(double , NPY_DOUBLE , int) + +/* *************************************************************** + * The follow macro expansion does not work, because C++ bool is 4 + * bytes and NPY_BOOL is 1 byte + * + * %numpy_typemaps(bool, NPY_BOOL, int) + */ + +/* *************************************************************** + * On my Mac, I get the following warning for this macro expansion: + * 'swig/python detected a memory leak of type 'long double *', no destructor found.' + * + * %numpy_typemaps(long double, NPY_LONGDOUBLE, int) + */ + +/* *************************************************************** + * Swig complains about a syntax error for the following macro + * expansions: + * + * %numpy_typemaps(complex float, NPY_CFLOAT , int) + * + * %numpy_typemaps(complex double, NPY_CDOUBLE, int) + * + * %numpy_typemaps(complex long double, NPY_CLONGDOUBLE, int) + */ + +#endif /* SWIGPYTHON */ diff --git a/doc/swig/pyfragments.swg b/doc/swig/pyfragments.swg new file mode 100644 index 000000000..0deaa61e1 --- /dev/null +++ b/doc/swig/pyfragments.swg @@ -0,0 +1,174 @@ +/*-*- C -*-*/ + +/**********************************************************************/ + +/* For numpy versions prior to 1.0, the names of certain data types + * are different than in later versions. This fragment provides macro + * substitutions that allow us to support old and new versions of + * numpy. + */ + +%fragment("NumPy_Backward_Compatibility", "header") +{ +/* Support older NumPy data type names + */ +%#if NDARRAY_VERSION < 0x01000000 +%#define NPY_BOOL PyArray_BOOL +%#define NPY_BYTE PyArray_BYTE +%#define NPY_UBYTE PyArray_UBYTE +%#define NPY_SHORT PyArray_SHORT +%#define NPY_USHORT PyArray_USHORT +%#define NPY_INT PyArray_INT +%#define NPY_UINT PyArray_UINT +%#define NPY_LONG PyArray_LONG +%#define NPY_ULONG PyArray_ULONG +%#define NPY_LONGLONG PyArray_LONGLONG +%#define NPY_ULONGLONG PyArray_ULONGLONG +%#define NPY_FLOAT PyArray_FLOAT +%#define NPY_DOUBLE PyArray_DOUBLE +%#define NPY_LONGDOUBLE PyArray_LONGDOUBLE +%#define NPY_CFLOAT PyArray_CFLOAT +%#define NPY_CDOUBLE PyArray_CDOUBLE +%#define NPY_CLONGDOUBLE PyArray_CLONGDOUBLE +%#define NPY_OBJECT PyArray_OBJECT +%#define NPY_STRING PyArray_STRING +%#define NPY_UNICODE PyArray_UNICODE +%#define NPY_VOID PyArray_VOID +%#define NPY_NTYPES PyArray_NTYPES +%#define NPY_NOTYPE PyArray_NOTYPE +%#define NPY_CHAR PyArray_CHAR +%#define NPY_USERDEF PyArray_USERDEF +%#define npy_intp intp + +%#define NPY_MAX_BYTE MAX_BYTE +%#define NPY_MIN_BYTE MIN_BYTE +%#define NPY_MAX_UBYTE MAX_UBYTE +%#define NPY_MAX_SHORT MAX_SHORT +%#define NPY_MIN_SHORT MIN_SHORT +%#define NPY_MAX_USHORT MAX_USHORT +%#define NPY_MAX_INT MAX_INT +%#define NPY_MIN_INT MIN_INT +%#define NPY_MAX_UINT MAX_UINT +%#define NPY_MAX_LONG MAX_LONG +%#define NPY_MIN_LONG MIN_LONG +%#define NPY_MAX_ULONG MAX_ULONG +%#define NPY_MAX_LONGLONG MAX_LONGLONG +%#define NPY_MIN_LONGLONG MIN_LONGLONG +%#define NPY_MAX_ULONGLONG MAX_ULONGLONG +%#define NPY_MAX_INTP MAX_INTP +%#define NPY_MIN_INTP MIN_INTP + +%#define NPY_FARRAY FARRAY +%#define NPY_F_CONTIGUOUS F_CONTIGUOUS +%#endif +} + +/**********************************************************************/ + +/* Override the SWIG_AsVal_frag(long) fragment so that it also checks + * for numpy scalar array types. The code through the %#endif is + * essentially cut-and-paste from pyprimtype.swg + */ + +%fragment(SWIG_AsVal_frag(long), "header", + fragment="SWIG_CanCastAsInteger", + fragment="NumPy_Backward_Compatibility") +{ + SWIGINTERN int + SWIG_AsVal_dec(long)(PyObject * obj, long * val) + { + static PyArray_Descr * longDescr = PyArray_DescrNewFromType(NPY_LONG); + if (PyInt_Check(obj)) { + if (val) *val = PyInt_AsLong(obj); + return SWIG_OK; + } else if (PyLong_Check(obj)) { + long v = PyLong_AsLong(obj); + if (!PyErr_Occurred()) { + if (val) *val = v; + return SWIG_OK; + } else { + PyErr_Clear(); + } + } +%#ifdef SWIG_PYTHON_CAST_MODE + { + int dispatch = 0; + long v = PyInt_AsLong(obj); + if (!PyErr_Occurred()) { + if (val) *val = v; + return SWIG_AddCast(SWIG_OK); + } else { + PyErr_Clear(); + } + if (!dispatch) { + double d; + int res = SWIG_AddCast(SWIG_AsVal(double)(obj,&d)); + if (SWIG_IsOK(res) && SWIG_CanCastAsInteger(&d, LONG_MIN, LONG_MAX)) { + if (val) *val = (long)(d); + return res; + } + } + } +%#endif + if (!PyArray_IsScalar(obj,Integer)) return SWIG_TypeError; + PyArray_CastScalarToCtype(obj, (void*)val, longDescr); + return SWIG_OK; + } +} + + +/* Override the SWIG_AsVal_frag(unsigned long) fragment so that it + * also checks for numpy scalar array types. The code through the + * %#endif is essentially cut-and-paste from pyprimtype.swg + */ + +%fragment(SWIG_AsVal_frag(unsigned long),"header", + fragment="SWIG_CanCastAsInteger", + fragment="NumPy_Backward_Compatibility") +{ + SWIGINTERN int + SWIG_AsVal_dec(unsigned long)(PyObject *obj, unsigned long *val) + { + static PyArray_Descr * ulongDescr = PyArray_DescrNewFromType(NPY_ULONG); + if (PyInt_Check(obj)) { + long v = PyInt_AsLong(obj); + if (v >= 0) { + if (val) *val = v; + return SWIG_OK; + } else { + return SWIG_OverflowError; + } + } else if (PyLong_Check(obj)) { + unsigned long v = PyLong_AsUnsignedLong(obj); + if (!PyErr_Occurred()) { + if (val) *val = v; + return SWIG_OK; + } else { + PyErr_Clear(); + } + } +%#ifdef SWIG_PYTHON_CAST_MODE + { + int dispatch = 0; + unsigned long v = PyLong_AsUnsignedLong(obj); + if (!PyErr_Occurred()) { + if (val) *val = v; + return SWIG_AddCast(SWIG_OK); + } else { + PyErr_Clear(); + } + if (!dispatch) { + double d; + int res = SWIG_AddCast(SWIG_AsVal(double)(obj,&d)); + if (SWIG_IsOK(res) && SWIG_CanCastAsInteger(&d, 0, ULONG_MAX)) { + if (val) *val = (unsigned long)(d); + return res; + } + } + } +%#endif + if (!PyArray_IsScalar(obj,Integer)) return SWIG_TypeError; + PyArray_CastScalarToCtype(obj, (void*)val, ulongDescr); + return SWIG_OK; + } +} diff --git a/doc/swig/test/Array.i b/doc/swig/test/Array.i new file mode 100644 index 000000000..d56dd2d1c --- /dev/null +++ b/doc/swig/test/Array.i @@ -0,0 +1,107 @@ +// -*- c++ -*- + +%module Array + +%{ +#define SWIG_FILE_WITH_INIT +#include "Array1.h" +#include "Array2.h" +%} + +// Get the NumPy typemaps +%include "../numpy.i" + + // Get the STL typemaps +%include "stl.i" + +// Handle standard exceptions +%include "exception.i" +%exception +{ + try + { + $action + } + catch (const std::invalid_argument& e) + { + SWIG_exception(SWIG_ValueError, e.what()); + } + catch (const std::out_of_range& e) + { + SWIG_exception(SWIG_IndexError, e.what()); + } +} +%init %{ + import_array(); +%} + +// Global ignores +%ignore *::operator=; +%ignore *::operator[]; + +// Apply the 1D NumPy typemaps +%apply (int DIM1 , long* INPLACE_ARRAY1) + {(int length, long* data )}; +%apply (long** ARGOUTVIEW_ARRAY1, int* DIM1 ) + {(long** data , int* length)}; + +// Apply the 2D NumPy typemaps +%apply (int DIM1 , int DIM2 , long* INPLACE_ARRAY2) + {(int nrows, int ncols, long* data )}; +%apply (int* DIM1 , int* DIM2 , long** ARGOUTVIEW_ARRAY2) + {(int* nrows, int* ncols, long** data )}; +// Note: the %apply for INPLACE_ARRAY2 above gets successfully applied +// to the constructor Array2(int nrows, int ncols, long* data), but +// does not get applied to the method Array2::resize(int nrows, int +// ncols, long* data). I have no idea why. For this reason the test +// for Apply2.resize(numpy.ndarray) in testArray.py is commented out. + +// Array1 support +%include "Array1.h" +%extend Array1 +{ + void __setitem__(int i, long v) + { + self->operator[](i) = v; + } + + long __getitem__(int i) + { + return self->operator[](i); + } + + int __len__() + { + return self->length(); + } + + std::string __str__() + { + return self->asString(); + } +} + +// Array2 support +%include "Array2.h" +%extend Array2 +{ + void __setitem__(int i, Array1 & v) + { + self->operator[](i) = v; + } + + Array1 & __getitem__(int i) + { + return self->operator[](i); + } + + int __len__() + { + return self->nrows() * self->ncols(); + } + + std::string __str__() + { + return self->asString(); + } +} diff --git a/doc/swig/test/Array1.cxx b/doc/swig/test/Array1.cxx new file mode 100644 index 000000000..0c09e02f9 --- /dev/null +++ b/doc/swig/test/Array1.cxx @@ -0,0 +1,131 @@ +#include "Array1.h" +#include +#include + +// Default/length/array constructor +Array1::Array1(int length, long* data) : + _ownData(false), _length(0), _buffer(0) +{ + resize(length, data); +} + +// Copy constructor +Array1::Array1(const Array1 & source) : + _length(source._length) +{ + allocateMemory(); + *this = source; +} + +// Destructor +Array1::~Array1() +{ + deallocateMemory(); +} + +// Assignment operator +Array1 & Array1::operator=(const Array1 & source) +{ + int len = _length < source._length ? _length : source._length; + for (int i=0; i < len; ++i) + { + (*this)[i] = source[i]; + } + return *this; +} + +// Equals operator +bool Array1::operator==(const Array1 & other) const +{ + if (_length != other._length) return false; + for (int i=0; i < _length; ++i) + { + if ((*this)[i] != other[i]) return false; + } + return true; +} + +// Length accessor +int Array1::length() const +{ + return _length; +} + +// Resize array +void Array1::resize(int length, long* data) +{ + if (length < 0) throw std::invalid_argument("Array1 length less than 0"); + if (length == _length) return; + deallocateMemory(); + _length = length; + if (!data) + { + allocateMemory(); + } + else + { + _ownData = false; + _buffer = data; + } +} + +// Set item accessor +long & Array1::operator[](int i) +{ + if (i < 0 || i >= _length) throw std::out_of_range("Array1 index out of range"); + return _buffer[i]; +} + +// Get item accessor +const long & Array1::operator[](int i) const +{ + if (i < 0 || i >= _length) throw std::out_of_range("Array1 index out of range"); + return _buffer[i]; +} + +// String output +std::string Array1::asString() const +{ + std::stringstream result; + result << "["; + for (int i=0; i < _length; ++i) + { + result << " " << _buffer[i]; + if (i < _length-1) result << ","; + } + result << " ]"; + return result.str(); +} + +// Get view +void Array1::view(long** data, int* length) const +{ + *data = _buffer; + *length = _length; +} + +// Private methods + void Array1::allocateMemory() + { + if (_length == 0) + { + _ownData = false; + _buffer = 0; + } + else + { + _ownData = true; + _buffer = new long[_length]; + } + } + + void Array1::deallocateMemory() + { + if (_ownData && _length && _buffer) + { + delete [] _buffer; + } + _ownData = false; + _length = 0; + _buffer = 0; + } diff --git a/doc/swig/test/Array1.h b/doc/swig/test/Array1.h new file mode 100644 index 000000000..754c248fc --- /dev/null +++ b/doc/swig/test/Array1.h @@ -0,0 +1,55 @@ +#ifndef ARRAY1_H +#define ARRAY1_H + +#include +#include + +class Array1 +{ +public: + + // Default/length/array constructor + Array1(int length = 0, long* data = 0); + + // Copy constructor + Array1(const Array1 & source); + + // Destructor + ~Array1(); + + // Assignment operator + Array1 & operator=(const Array1 & source); + + // Equals operator + bool operator==(const Array1 & other) const; + + // Length accessor + int length() const; + + // Resize array + void resize(int length, long* data = 0); + + // Set item accessor + long & operator[](int i); + + // Get item accessor + const long & operator[](int i) const; + + // String output + std::string asString() const; + + // Get view + void view(long** data, int* length) const; + +private: + // Members + bool _ownData; + int _length; + long * _buffer; + + // Methods + void allocateMemory(); + void deallocateMemory(); +}; + +#endif diff --git a/doc/swig/test/Array2.cxx b/doc/swig/test/Array2.cxx new file mode 100644 index 000000000..e3558f786 --- /dev/null +++ b/doc/swig/test/Array2.cxx @@ -0,0 +1,168 @@ +#include "Array2.h" +#include + +// Default constructor +Array2::Array2() : + _ownData(false), _nrows(0), _ncols(), _buffer(0), _rows(0) +{ } + +// Size/array constructor +Array2::Array2(int nrows, int ncols, long* data) : + _ownData(false), _nrows(0), _ncols(), _buffer(0), _rows(0) +{ + resize(nrows, ncols, data); +} + +// Copy constructor +Array2::Array2(const Array2 & source) : + _nrows(source._nrows), _ncols(source._ncols) +{ + _ownData = true; + allocateMemory(); + *this = source; +} + +// Destructor +Array2::~Array2() +{ + deallocateMemory(); +} + +// Assignment operator +Array2 & Array2::operator=(const Array2 & source) +{ + int nrows = _nrows < source._nrows ? _nrows : source._nrows; + int ncols = _ncols < source._ncols ? _ncols : source._ncols; + for (int i=0; i < nrows; ++i) + { + for (int j=0; j < ncols; ++j) + { + (*this)[i][j] = source[i][j]; + } + } + return *this; +} + +// Equals operator +bool Array2::operator==(const Array2 & other) const +{ + if (_nrows != other._nrows) return false; + if (_ncols != other._ncols) return false; + for (int i=0; i < _nrows; ++i) + { + for (int j=0; j < _ncols; ++j) + { + if ((*this)[i][j] != other[i][j]) return false; + } + } + return true; +} + +// Length accessors +int Array2::nrows() const +{ + return _nrows; +} + +int Array2::ncols() const +{ + return _ncols; +} + +// Resize array +void Array2::resize(int nrows, int ncols, long* data) +{ + if (nrows < 0) throw std::invalid_argument("Array2 nrows less than 0"); + if (ncols < 0) throw std::invalid_argument("Array2 ncols less than 0"); + if (nrows == _nrows && ncols == _ncols) return; + deallocateMemory(); + _nrows = nrows; + _ncols = ncols; + if (!data) + { + allocateMemory(); + } + else + { + _ownData = false; + _buffer = data; + allocateRows(); + } +} + +// Set item accessor +Array1 & Array2::operator[](int i) +{ + if (i < 0 || i > _nrows) throw std::out_of_range("Array2 row index out of range"); + return _rows[i]; +} + +// Get item accessor +const Array1 & Array2::operator[](int i) const +{ + if (i < 0 || i > _nrows) throw std::out_of_range("Array2 row index out of range"); + return _rows[i]; +} + +// String output +std::string Array2::asString() const +{ + std::stringstream result; + result << "[ "; + for (int i=0; i < _nrows; ++i) + { + if (i > 0) result << " "; + result << (*this)[i].asString(); + if (i < _nrows-1) result << "," << std::endl; + } + result << " ]" << std::endl; + return result.str(); +} + +// Get view +void Array2::view(int* nrows, int* ncols, long** data) const +{ + *nrows = _nrows; + *ncols = _ncols; + *data = _buffer; +} + +// Private methods +void Array2::allocateMemory() +{ + if (_nrows * _ncols == 0) + { + _ownData = false; + _buffer = 0; + _rows = 0; + } + else + { + _ownData = true; + _buffer = new long[_nrows*_ncols]; + allocateRows(); + } +} + +void Array2::allocateRows() +{ + _rows = new Array1[_nrows]; + for (int i=0; i < _nrows; ++i) + { + _rows[i].resize(_ncols, &_buffer[i*_ncols]); + } +} + +void Array2::deallocateMemory() +{ + if (_ownData && _nrows*_ncols && _buffer) + { + delete [] _rows; + delete [] _buffer; + } + _ownData = false; + _nrows = 0; + _ncols = 0; + _buffer = 0; + _rows = 0; +} diff --git a/doc/swig/test/Array2.h b/doc/swig/test/Array2.h new file mode 100644 index 000000000..a6e5bfc30 --- /dev/null +++ b/doc/swig/test/Array2.h @@ -0,0 +1,63 @@ +#ifndef ARRAY2_H +#define ARRAY2_H + +#include "Array1.h" +#include +#include + +class Array2 +{ +public: + + // Default constructor + Array2(); + + // Size/array constructor + Array2(int nrows, int ncols, long* data=0); + + // Copy constructor + Array2(const Array2 & source); + + // Destructor + ~Array2(); + + // Assignment operator + Array2 & operator=(const Array2 & source); + + // Equals operator + bool operator==(const Array2 & other) const; + + // Length accessors + int nrows() const; + int ncols() const; + + // Resize array + void resize(int ncols, int nrows, long* data=0); + + // Set item accessor + Array1 & operator[](int i); + + // Get item accessor + const Array1 & operator[](int i) const; + + // String output + std::string asString() const; + + // Get view + void view(int* nrows, int* ncols, long** data) const; + +private: + // Members + bool _ownData; + int _nrows; + int _ncols; + long * _buffer; + Array1 * _rows; + + // Methods + void allocateMemory(); + void allocateRows(); + void deallocateMemory(); +}; + +#endif diff --git a/doc/swig/test/Farray.cxx b/doc/swig/test/Farray.cxx new file mode 100644 index 000000000..3983c333b --- /dev/null +++ b/doc/swig/test/Farray.cxx @@ -0,0 +1,122 @@ +#include "Farray.h" +#include + +// Size constructor +Farray::Farray(int nrows, int ncols) : + _nrows(nrows), _ncols(ncols), _buffer(0) +{ + allocateMemory(); +} + +// Copy constructor +Farray::Farray(const Farray & source) : + _nrows(source._nrows), _ncols(source._ncols) +{ + allocateMemory(); + *this = source; +} + +// Destructor +Farray::~Farray() +{ + delete [] _buffer; +} + +// Assignment operator +Farray & Farray::operator=(const Farray & source) +{ + int nrows = _nrows < source._nrows ? _nrows : source._nrows; + int ncols = _ncols < source._ncols ? _ncols : source._ncols; + for (int i=0; i < nrows; ++i) + { + for (int j=0; j < ncols; ++j) + { + (*this)(i,j) = source(i,j); + } + } + return *this; +} + +// Equals operator +bool Farray::operator==(const Farray & other) const +{ + if (_nrows != other._nrows) return false; + if (_ncols != other._ncols) return false; + for (int i=0; i < _nrows; ++i) + { + for (int j=0; j < _ncols; ++j) + { + if ((*this)(i,j) != other(i,j)) return false; + } + } + return true; +} + +// Length accessors +int Farray::nrows() const +{ + return _nrows; +} + +int Farray::ncols() const +{ + return _ncols; +} + +// Set item accessor +long & Farray::operator()(int i, int j) +{ + if (i < 0 || i > _nrows) throw std::out_of_range("Farray row index out of range"); + if (j < 0 || j > _ncols) throw std::out_of_range("Farray col index out of range"); + return _buffer[offset(i,j)]; +} + +// Get item accessor +const long & Farray::operator()(int i, int j) const +{ + if (i < 0 || i > _nrows) throw std::out_of_range("Farray row index out of range"); + if (j < 0 || j > _ncols) throw std::out_of_range("Farray col index out of range"); + return _buffer[offset(i,j)]; +} + +// String output +std::string Farray::asString() const +{ + std::stringstream result; + result << "[ "; + for (int i=0; i < _nrows; ++i) + { + if (i > 0) result << " "; + result << "["; + for (int j=0; j < _ncols; ++j) + { + result << " " << (*this)(i,j); + if (j < _ncols-1) result << ","; + } + result << " ]"; + if (i < _nrows-1) result << "," << std::endl; + } + result << " ]" << std::endl; + return result.str(); +} + +// Get view +void Farray::view(int* nrows, int* ncols, long** data) const +{ + *nrows = _nrows; + *ncols = _ncols; + *data = _buffer; +} + +// Private methods +void Farray::allocateMemory() +{ + if (_nrows <= 0) throw std::invalid_argument("Farray nrows <= 0"); + if (_ncols <= 0) throw std::invalid_argument("Farray ncols <= 0"); + _buffer = new long[_nrows*_ncols]; +} + +inline int Farray::offset(int i, int j) const +{ + return i + j * _nrows; +} diff --git a/doc/swig/test/Farray.h b/doc/swig/test/Farray.h new file mode 100644 index 000000000..4199a287c --- /dev/null +++ b/doc/swig/test/Farray.h @@ -0,0 +1,56 @@ +#ifndef FARRAY_H +#define FARRAY_H + +#include +#include + +class Farray +{ +public: + + // Size constructor + Farray(int nrows, int ncols); + + // Copy constructor + Farray(const Farray & source); + + // Destructor + ~Farray(); + + // Assignment operator + Farray & operator=(const Farray & source); + + // Equals operator + bool operator==(const Farray & other) const; + + // Length accessors + int nrows() const; + int ncols() const; + + // Set item accessor + long & operator()(int i, int j); + + // Get item accessor + const long & operator()(int i, int j) const; + + // String output + std::string asString() const; + + // Get view + void view(int* nrows, int* ncols, long** data) const; + +private: + // Members + int _nrows; + int _ncols; + long * _buffer; + + // Default constructor: not implemented + Farray(); + + // Methods + void allocateMemory(); + int offset(int i, int j) const; +}; + +#endif diff --git a/doc/swig/test/Farray.i b/doc/swig/test/Farray.i new file mode 100644 index 000000000..25f6cd025 --- /dev/null +++ b/doc/swig/test/Farray.i @@ -0,0 +1,73 @@ +// -*- c++ -*- + +%module Farray + +%{ +#define SWIG_FILE_WITH_INIT +#include "Farray.h" +%} + +// Get the NumPy typemaps +%include "../numpy.i" + + // Get the STL typemaps +%include "stl.i" + +// Handle standard exceptions +%include "exception.i" +%exception +{ + try + { + $action + } + catch (const std::invalid_argument& e) + { + SWIG_exception(SWIG_ValueError, e.what()); + } + catch (const std::out_of_range& e) + { + SWIG_exception(SWIG_IndexError, e.what()); + } +} +%init %{ + import_array(); +%} + +// Global ignores +%ignore *::operator=; +%ignore *::operator(); + +// Apply the 2D NumPy typemaps +%apply (int* DIM1 , int* DIM2 , long** ARGOUTVIEW_FARRAY2) + {(int* nrows, int* ncols, long** data )}; + +// Farray support +%include "Farray.h" +%extend Farray +{ + PyObject * __setitem__(PyObject* index, long v) + { + int i, j; + if (!PyArg_ParseTuple(index, "ii:Farray___setitem__",&i,&j)) return NULL; + self->operator()(i,j) = v; + return Py_BuildValue(""); + } + + PyObject * __getitem__(PyObject * index) + { + int i, j; + if (!PyArg_ParseTuple(index, "ii:Farray___getitem__",&i,&j)) return NULL; + return SWIG_From_long(self->operator()(i,j)); + } + + int __len__() + { + return self->nrows() * self->ncols(); + } + + std::string __str__() + { + return self->asString(); + } +} diff --git a/doc/swig/test/Fortran.cxx b/doc/swig/test/Fortran.cxx new file mode 100644 index 000000000..475d21ddc --- /dev/null +++ b/doc/swig/test/Fortran.cxx @@ -0,0 +1,24 @@ +#include +#include +#include +#include "Fortran.h" + +#define TEST_FUNCS(TYPE, SNAME) \ +\ +TYPE SNAME ## SecondElement(TYPE * matrix, int rows, int cols) { \ + TYPE result = matrix[1]; \ + return result; \ +} \ + +TEST_FUNCS(signed char , schar ) +TEST_FUNCS(unsigned char , uchar ) +TEST_FUNCS(short , short ) +TEST_FUNCS(unsigned short , ushort ) +TEST_FUNCS(int , int ) +TEST_FUNCS(unsigned int , uint ) +TEST_FUNCS(long , long ) +TEST_FUNCS(unsigned long , ulong ) +TEST_FUNCS(long long , longLong ) +TEST_FUNCS(unsigned long long, ulongLong) +TEST_FUNCS(float , float ) +TEST_FUNCS(double , double ) diff --git a/doc/swig/test/Fortran.h b/doc/swig/test/Fortran.h new file mode 100644 index 000000000..c243bb50f --- /dev/null +++ b/doc/swig/test/Fortran.h @@ -0,0 +1,21 @@ +#ifndef FORTRAN_H +#define FORTRAN_H + +#define TEST_FUNC_PROTOS(TYPE, SNAME) \ +\ +TYPE SNAME ## SecondElement( TYPE * matrix, int rows, int cols); \ + +TEST_FUNC_PROTOS(signed char , schar ) +TEST_FUNC_PROTOS(unsigned char , uchar ) +TEST_FUNC_PROTOS(short , short ) +TEST_FUNC_PROTOS(unsigned short , ushort ) +TEST_FUNC_PROTOS(int , int ) +TEST_FUNC_PROTOS(unsigned int , uint ) +TEST_FUNC_PROTOS(long , long ) +TEST_FUNC_PROTOS(unsigned long , ulong ) +TEST_FUNC_PROTOS(long long , longLong ) +TEST_FUNC_PROTOS(unsigned long long, ulongLong) +TEST_FUNC_PROTOS(float , float ) +TEST_FUNC_PROTOS(double , double ) + +#endif diff --git a/doc/swig/test/Fortran.i b/doc/swig/test/Fortran.i new file mode 100644 index 000000000..131790dd6 --- /dev/null +++ b/doc/swig/test/Fortran.i @@ -0,0 +1,36 @@ +// -*- c++ -*- +%module Fortran + +%{ +#define SWIG_FILE_WITH_INIT +#include "Fortran.h" +%} + +// Get the NumPy typemaps +%include "../numpy.i" + +%init %{ + import_array(); +%} + +%define %apply_numpy_typemaps(TYPE) + +%apply (TYPE* IN_FARRAY2, int DIM1, int DIM2) {(TYPE* matrix, int rows, int cols)}; + +%enddef /* %apply_numpy_typemaps() macro */ + +%apply_numpy_typemaps(signed char ) +%apply_numpy_typemaps(unsigned char ) +%apply_numpy_typemaps(short ) +%apply_numpy_typemaps(unsigned short ) +%apply_numpy_typemaps(int ) +%apply_numpy_typemaps(unsigned int ) +%apply_numpy_typemaps(long ) +%apply_numpy_typemaps(unsigned long ) +%apply_numpy_typemaps(long long ) +%apply_numpy_typemaps(unsigned long long) +%apply_numpy_typemaps(float ) +%apply_numpy_typemaps(double ) + +// Include the header file to be wrapped +%include "Fortran.h" diff --git a/doc/swig/test/Makefile b/doc/swig/test/Makefile new file mode 100644 index 000000000..5360b1ced --- /dev/null +++ b/doc/swig/test/Makefile @@ -0,0 +1,34 @@ +# SWIG +INTERFACES = Array.i Farray.i Vector.i Matrix.i Tensor.i Fortran.i +WRAPPERS = $(INTERFACES:.i=_wrap.cxx) +PROXIES = $(INTERFACES:.i=.py ) + +# Default target: build the tests +.PHONY : all +all: $(WRAPPERS) Array1.cxx Array1.h Farray.cxx Farray.h Vector.cxx Vector.h \ + Matrix.cxx Matrix.h Tensor.cxx Tensor.h Fortran.h Fortran.cxx + ./setup.py build_ext -i + +# Test target: run the tests +.PHONY : test +test: all + python testVector.py + python testMatrix.py + python testTensor.py + python testArray.py + python testFarray.py + python testFortran.py + +# Rule: %.i -> %_wrap.cxx +%_wrap.cxx: %.i %.h ../numpy.i + swig -c++ -python $< +%_wrap.cxx: %.i %1.h %2.h ../numpy.i + swig -c++ -python $< + +# Clean target +.PHONY : clean +clean: + $(RM) -r build + $(RM) *.so + $(RM) $(WRAPPERS) + $(RM) $(PROXIES) diff --git a/doc/swig/test/Matrix.cxx b/doc/swig/test/Matrix.cxx new file mode 100644 index 000000000..b953d7017 --- /dev/null +++ b/doc/swig/test/Matrix.cxx @@ -0,0 +1,112 @@ +#include +#include +#include +#include "Matrix.h" + +// The following macro defines a family of functions that work with 2D +// arrays with the forms +// +// TYPE SNAMEDet( TYPE matrix[2][2]); +// TYPE SNAMEMax( TYPE * matrix, int rows, int cols); +// TYPE SNAMEMin( int rows, int cols, TYPE * matrix); +// void SNAMEScale( TYPE matrix[3][3]); +// void SNAMEFloor( TYPE * array, int rows, int cols, TYPE floor); +// void SNAMECeil( int rows, int cols, TYPE * array, TYPE ceil); +// void SNAMELUSplit(TYPE in[3][3], TYPE lower[3][3], TYPE upper[3][3]); +// +// for any specified type TYPE (for example: short, unsigned int, long +// long, etc.) with given short name SNAME (for example: short, uint, +// longLong, etc.). The macro is then expanded for the given +// TYPE/SNAME pairs. The resulting functions are for testing numpy +// interfaces, respectively, for: +// +// * 2D input arrays, hard-coded length +// * 2D input arrays +// * 2D input arrays, data last +// * 2D in-place arrays, hard-coded lengths +// * 2D in-place arrays +// * 2D in-place arrays, data last +// * 2D argout arrays, hard-coded length +// +#define TEST_FUNCS(TYPE, SNAME) \ +\ +TYPE SNAME ## Det(TYPE matrix[2][2]) { \ + return matrix[0][0]*matrix[1][1] - matrix[0][1]*matrix[1][0]; \ +} \ +\ +TYPE SNAME ## Max(TYPE * matrix, int rows, int cols) { \ + int i, j, index; \ + TYPE result = matrix[0]; \ + for (j=0; j result) result = matrix[index]; \ + } \ + } \ + return result; \ +} \ +\ +TYPE SNAME ## Min(int rows, int cols, TYPE * matrix) { \ + int i, j, index; \ + TYPE result = matrix[0]; \ + for (j=0; j ceil) array[index] = ceil; \ + } \ + } \ +} \ +\ +void SNAME ## LUSplit(TYPE matrix[3][3], TYPE lower[3][3], TYPE upper[3][3]) { \ + for (int i=0; i<3; ++i) { \ + for (int j=0; j<3; ++j) { \ + if (i >= j) { \ + lower[i][j] = matrix[i][j]; \ + upper[i][j] = 0; \ + } else { \ + lower[i][j] = 0; \ + upper[i][j] = matrix[i][j]; \ + } \ + } \ + } \ +} + +TEST_FUNCS(signed char , schar ) +TEST_FUNCS(unsigned char , uchar ) +TEST_FUNCS(short , short ) +TEST_FUNCS(unsigned short , ushort ) +TEST_FUNCS(int , int ) +TEST_FUNCS(unsigned int , uint ) +TEST_FUNCS(long , long ) +TEST_FUNCS(unsigned long , ulong ) +TEST_FUNCS(long long , longLong ) +TEST_FUNCS(unsigned long long, ulongLong) +TEST_FUNCS(float , float ) +TEST_FUNCS(double , double ) diff --git a/doc/swig/test/Matrix.h b/doc/swig/test/Matrix.h new file mode 100644 index 000000000..f37836cc4 --- /dev/null +++ b/doc/swig/test/Matrix.h @@ -0,0 +1,52 @@ +#ifndef MATRIX_H +#define MATRIX_H + +// The following macro defines the prototypes for a family of +// functions that work with 2D arrays with the forms +// +// TYPE SNAMEDet( TYPE matrix[2][2]); +// TYPE SNAMEMax( TYPE * matrix, int rows, int cols); +// TYPE SNAMEMin( int rows, int cols, TYPE * matrix); +// void SNAMEScale( TYPE array[3][3]); +// void SNAMEFloor( TYPE * array, int rows, int cols, TYPE floor); +// void SNAMECeil( int rows, int cols, TYPE * array, TYPE ceil ); +// void SNAMELUSplit(TYPE in[3][3], TYPE lower[3][3], TYPE upper[3][3]); +// +// for any specified type TYPE (for example: short, unsigned int, long +// long, etc.) with given short name SNAME (for example: short, uint, +// longLong, etc.). The macro is then expanded for the given +// TYPE/SNAME pairs. The resulting functions are for testing numpy +// interfaces, respectively, for: +// +// * 2D input arrays, hard-coded lengths +// * 2D input arrays +// * 2D input arrays, data last +// * 2D in-place arrays, hard-coded lengths +// * 2D in-place arrays +// * 2D in-place arrays, data last +// * 2D argout arrays, hard-coded length +// +#define TEST_FUNC_PROTOS(TYPE, SNAME) \ +\ +TYPE SNAME ## Det( TYPE matrix[2][2]); \ +TYPE SNAME ## Max( TYPE * matrix, int rows, int cols); \ +TYPE SNAME ## Min( int rows, int cols, TYPE * matrix); \ +void SNAME ## Scale( TYPE array[3][3], TYPE val); \ +void SNAME ## Floor( TYPE * array, int rows, int cols, TYPE floor); \ +void SNAME ## Ceil( int rows, int cols, TYPE * array, TYPE ceil ); \ +void SNAME ## LUSplit(TYPE matrix[3][3], TYPE lower[3][3], TYPE upper[3][3]); + +TEST_FUNC_PROTOS(signed char , schar ) +TEST_FUNC_PROTOS(unsigned char , uchar ) +TEST_FUNC_PROTOS(short , short ) +TEST_FUNC_PROTOS(unsigned short , ushort ) +TEST_FUNC_PROTOS(int , int ) +TEST_FUNC_PROTOS(unsigned int , uint ) +TEST_FUNC_PROTOS(long , long ) +TEST_FUNC_PROTOS(unsigned long , ulong ) +TEST_FUNC_PROTOS(long long , longLong ) +TEST_FUNC_PROTOS(unsigned long long, ulongLong) +TEST_FUNC_PROTOS(float , float ) +TEST_FUNC_PROTOS(double , double ) + +#endif diff --git a/doc/swig/test/Matrix.i b/doc/swig/test/Matrix.i new file mode 100644 index 000000000..e721397a0 --- /dev/null +++ b/doc/swig/test/Matrix.i @@ -0,0 +1,45 @@ +// -*- c++ -*- +%module Matrix + +%{ +#define SWIG_FILE_WITH_INIT +#include "Matrix.h" +%} + +// Get the NumPy typemaps +%include "../numpy.i" + +%init %{ + import_array(); +%} + +%define %apply_numpy_typemaps(TYPE) + +%apply (TYPE IN_ARRAY2[ANY][ANY]) {(TYPE matrix[ANY][ANY])}; +%apply (TYPE* IN_ARRAY2, int DIM1, int DIM2) {(TYPE* matrix, int rows, int cols)}; +%apply (int DIM1, int DIM2, TYPE* IN_ARRAY2) {(int rows, int cols, TYPE* matrix)}; + +%apply (TYPE INPLACE_ARRAY2[ANY][ANY]) {(TYPE array[3][3])}; +%apply (TYPE* INPLACE_ARRAY2, int DIM1, int DIM2) {(TYPE* array, int rows, int cols)}; +%apply (int DIM1, int DIM2, TYPE* INPLACE_ARRAY2) {(int rows, int cols, TYPE* array)}; + +%apply (TYPE ARGOUT_ARRAY2[ANY][ANY]) {(TYPE lower[3][3])}; +%apply (TYPE ARGOUT_ARRAY2[ANY][ANY]) {(TYPE upper[3][3])}; + +%enddef /* %apply_numpy_typemaps() macro */ + +%apply_numpy_typemaps(signed char ) +%apply_numpy_typemaps(unsigned char ) +%apply_numpy_typemaps(short ) +%apply_numpy_typemaps(unsigned short ) +%apply_numpy_typemaps(int ) +%apply_numpy_typemaps(unsigned int ) +%apply_numpy_typemaps(long ) +%apply_numpy_typemaps(unsigned long ) +%apply_numpy_typemaps(long long ) +%apply_numpy_typemaps(unsigned long long) +%apply_numpy_typemaps(float ) +%apply_numpy_typemaps(double ) + +// Include the header file to be wrapped +%include "Matrix.h" diff --git a/doc/swig/test/Tensor.cxx b/doc/swig/test/Tensor.cxx new file mode 100644 index 000000000..dce595291 --- /dev/null +++ b/doc/swig/test/Tensor.cxx @@ -0,0 +1,131 @@ +#include +#include +#include +#include "Tensor.h" + +// The following macro defines a family of functions that work with 3D +// arrays with the forms +// +// TYPE SNAMENorm( TYPE tensor[2][2][2]); +// TYPE SNAMEMax( TYPE * tensor, int rows, int cols, int num); +// TYPE SNAMEMin( int rows, int cols, int num, TYPE * tensor); +// void SNAMEScale( TYPE tensor[3][3][3]); +// void SNAMEFloor( TYPE * array, int rows, int cols, int num, TYPE floor); +// void SNAMECeil( int rows, int cols, int num, TYPE * array, TYPE ceil); +// void SNAMELUSplit(TYPE in[2][2][2], TYPE lower[2][2][2], TYPE upper[2][2][2]); +// +// for any specified type TYPE (for example: short, unsigned int, long +// long, etc.) with given short name SNAME (for example: short, uint, +// longLong, etc.). The macro is then expanded for the given +// TYPE/SNAME pairs. The resulting functions are for testing numpy +// interfaces, respectively, for: +// +// * 3D input arrays, hard-coded length +// * 3D input arrays +// * 3D input arrays, data last +// * 3D in-place arrays, hard-coded lengths +// * 3D in-place arrays +// * 3D in-place arrays, data last +// * 3D argout arrays, hard-coded length +// +#define TEST_FUNCS(TYPE, SNAME) \ +\ +TYPE SNAME ## Norm(TYPE tensor[2][2][2]) { \ + double result = 0; \ + for (int k=0; k<2; ++k) \ + for (int j=0; j<2; ++j) \ + for (int i=0; i<2; ++i) \ + result += tensor[i][j][k] * tensor[i][j][k]; \ + return (TYPE)sqrt(result/8); \ +} \ +\ +TYPE SNAME ## Max(TYPE * tensor, int rows, int cols, int num) { \ + int i, j, k, index; \ + TYPE result = tensor[0]; \ + for (k=0; k result) result = tensor[index]; \ + } \ + } \ + } \ + return result; \ +} \ +\ +TYPE SNAME ## Min(int rows, int cols, int num, TYPE * tensor) { \ + int i, j, k, index; \ + TYPE result = tensor[0]; \ + for (k=0; k ceil) array[index] = ceil; \ + } \ + } \ + } \ +} \ +\ +void SNAME ## LUSplit(TYPE tensor[2][2][2], TYPE lower[2][2][2], \ + TYPE upper[2][2][2]) { \ + int sum; \ + for (int k=0; k<2; ++k) { \ + for (int j=0; j<2; ++j) { \ + for (int i=0; i<2; ++i) { \ + sum = i + j + k; \ + if (sum < 2) { \ + lower[i][j][k] = tensor[i][j][k]; \ + upper[i][j][k] = 0; \ + } else { \ + upper[i][j][k] = tensor[i][j][k]; \ + lower[i][j][k] = 0; \ + } \ + } \ + } \ + } \ +} + +TEST_FUNCS(signed char , schar ) +TEST_FUNCS(unsigned char , uchar ) +TEST_FUNCS(short , short ) +TEST_FUNCS(unsigned short , ushort ) +TEST_FUNCS(int , int ) +TEST_FUNCS(unsigned int , uint ) +TEST_FUNCS(long , long ) +TEST_FUNCS(unsigned long , ulong ) +TEST_FUNCS(long long , longLong ) +TEST_FUNCS(unsigned long long, ulongLong) +TEST_FUNCS(float , float ) +TEST_FUNCS(double , double ) diff --git a/doc/swig/test/Tensor.h b/doc/swig/test/Tensor.h new file mode 100644 index 000000000..d60eb2d2e --- /dev/null +++ b/doc/swig/test/Tensor.h @@ -0,0 +1,52 @@ +#ifndef TENSOR_H +#define TENSOR_H + +// The following macro defines the prototypes for a family of +// functions that work with 3D arrays with the forms +// +// TYPE SNAMENorm( TYPE tensor[2][2][2]); +// TYPE SNAMEMax( TYPE * tensor, int rows, int cols, int num); +// TYPE SNAMEMin( int rows, int cols, int num, TYPE * tensor); +// void SNAMEScale( TYPE array[3][3][3]); +// void SNAMEFloor( TYPE * array, int rows, int cols, int num, TYPE floor); +// void SNAMECeil( int rows, int cols, int num, TYPE * array, TYPE ceil ); +// void SNAMELUSplit(TYPE in[3][3][3], TYPE lower[3][3][3], TYPE upper[3][3][3]); +// +// for any specified type TYPE (for example: short, unsigned int, long +// long, etc.) with given short name SNAME (for example: short, uint, +// longLong, etc.). The macro is then expanded for the given +// TYPE/SNAME pairs. The resulting functions are for testing numpy +// interfaces, respectively, for: +// +// * 3D input arrays, hard-coded lengths +// * 3D input arrays +// * 3D input arrays, data last +// * 3D in-place arrays, hard-coded lengths +// * 3D in-place arrays +// * 3D in-place arrays, data last +// * 3D argout arrays, hard-coded length +// +#define TEST_FUNC_PROTOS(TYPE, SNAME) \ +\ +TYPE SNAME ## Norm( TYPE tensor[2][2][2]); \ +TYPE SNAME ## Max( TYPE * tensor, int rows, int cols, int num); \ +TYPE SNAME ## Min( int rows, int cols, int num, TYPE * tensor); \ +void SNAME ## Scale( TYPE array[3][3][3], TYPE val); \ +void SNAME ## Floor( TYPE * array, int rows, int cols, int num, TYPE floor); \ +void SNAME ## Ceil( int rows, int cols, int num, TYPE * array, TYPE ceil ); \ +void SNAME ## LUSplit(TYPE tensor[2][2][2], TYPE lower[2][2][2], TYPE upper[2][2][2]); + +TEST_FUNC_PROTOS(signed char , schar ) +TEST_FUNC_PROTOS(unsigned char , uchar ) +TEST_FUNC_PROTOS(short , short ) +TEST_FUNC_PROTOS(unsigned short , ushort ) +TEST_FUNC_PROTOS(int , int ) +TEST_FUNC_PROTOS(unsigned int , uint ) +TEST_FUNC_PROTOS(long , long ) +TEST_FUNC_PROTOS(unsigned long , ulong ) +TEST_FUNC_PROTOS(long long , longLong ) +TEST_FUNC_PROTOS(unsigned long long, ulongLong) +TEST_FUNC_PROTOS(float , float ) +TEST_FUNC_PROTOS(double , double ) + +#endif diff --git a/doc/swig/test/Tensor.i b/doc/swig/test/Tensor.i new file mode 100644 index 000000000..a1198dc9e --- /dev/null +++ b/doc/swig/test/Tensor.i @@ -0,0 +1,49 @@ +// -*- c++ -*- +%module Tensor + +%{ +#define SWIG_FILE_WITH_INIT +#include "Tensor.h" +%} + +// Get the NumPy typemaps +%include "../numpy.i" + +%init %{ + import_array(); +%} + +%define %apply_numpy_typemaps(TYPE) + +%apply (TYPE IN_ARRAY3[ANY][ANY][ANY]) {(TYPE tensor[ANY][ANY][ANY])}; +%apply (TYPE* IN_ARRAY3, int DIM1, int DIM2, int DIM3) + {(TYPE* tensor, int rows, int cols, int num)}; +%apply (int DIM1, int DIM2, int DIM3, TYPE* IN_ARRAY3) + {(int rows, int cols, int num, TYPE* tensor)}; + +%apply (TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) {(TYPE array[3][3][3])}; +%apply (TYPE* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) + {(TYPE* array, int rows, int cols, int num)}; +%apply (int DIM1, int DIM2, int DIM3, TYPE* INPLACE_ARRAY3) + {(int rows, int cols, int num, TYPE* array)}; + +%apply (TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) {(TYPE lower[2][2][2])}; +%apply (TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) {(TYPE upper[2][2][2])}; + +%enddef /* %apply_numpy_typemaps() macro */ + +%apply_numpy_typemaps(signed char ) +%apply_numpy_typemaps(unsigned char ) +%apply_numpy_typemaps(short ) +%apply_numpy_typemaps(unsigned short ) +%apply_numpy_typemaps(int ) +%apply_numpy_typemaps(unsigned int ) +%apply_numpy_typemaps(long ) +%apply_numpy_typemaps(unsigned long ) +%apply_numpy_typemaps(long long ) +%apply_numpy_typemaps(unsigned long long) +%apply_numpy_typemaps(float ) +%apply_numpy_typemaps(double ) + +// Include the header file to be wrapped +%include "Tensor.h" diff --git a/doc/swig/test/Vector.cxx b/doc/swig/test/Vector.cxx new file mode 100644 index 000000000..2c90404da --- /dev/null +++ b/doc/swig/test/Vector.cxx @@ -0,0 +1,100 @@ +#include +#include +#include +#include "Vector.h" + +// The following macro defines a family of functions that work with 1D +// arrays with the forms +// +// TYPE SNAMELength( TYPE vector[3]); +// TYPE SNAMEProd( TYPE * series, int size); +// TYPE SNAMESum( int size, TYPE * series); +// void SNAMEReverse(TYPE array[3]); +// void SNAMEOnes( TYPE * array, int size); +// void SNAMEZeros( int size, TYPE * array); +// void SNAMEEOSplit(TYPE vector[3], TYPE even[3], odd[3]); +// void SNAMETwos( TYPE * twoVec, int size); +// void SNAMEThrees( int size, TYPE * threeVec); +// +// for any specified type TYPE (for example: short, unsigned int, long +// long, etc.) with given short name SNAME (for example: short, uint, +// longLong, etc.). The macro is then expanded for the given +// TYPE/SNAME pairs. The resulting functions are for testing numpy +// interfaces, respectively, for: +// +// * 1D input arrays, hard-coded length +// * 1D input arrays +// * 1D input arrays, data last +// * 1D in-place arrays, hard-coded length +// * 1D in-place arrays +// * 1D in-place arrays, data last +// * 1D argout arrays, hard-coded length +// * 1D argout arrays +// * 1D argout arrays, data last +// +#define TEST_FUNCS(TYPE, SNAME) \ +\ +TYPE SNAME ## Length(TYPE vector[3]) { \ + double result = 0; \ + for (int i=0; i<3; ++i) result += vector[i]*vector[i]; \ + return (TYPE)sqrt(result); \ +} \ +\ +TYPE SNAME ## Prod(TYPE * series, int size) { \ + TYPE result = 1; \ + for (int i=0; i>sys.stderr, self.typeStr, "... ", + second = Fortran.__dict__[self.typeStr + "SecondElement"] + matrix = np.arange(9).reshape(3, 3).astype(self.typeCode) + self.assertEquals(second(matrix), 3) + + def testSecondElementFortran(self): + "Test luSplit function with a Fortran-array" + print >>sys.stderr, self.typeStr, "... ", + second = Fortran.__dict__[self.typeStr + "SecondElement"] + matrix = np.asfortranarray(np.arange(9).reshape(3, 3), + self.typeCode) + self.assertEquals(second(matrix), 3) + + def testSecondElementObject(self): + "Test luSplit function with a Fortran-array" + print >>sys.stderr, self.typeStr, "... ", + second = Fortran.__dict__[self.typeStr + "SecondElement"] + matrix = np.asfortranarray([[0,1,2],[3,4,5],[6,7,8]], self.typeCode) + self.assertEquals(second(matrix), 3) + +###################################################################### + +class scharTestCase(FortranTestCase): + def __init__(self, methodName="runTest"): + FortranTestCase.__init__(self, methodName) + self.typeStr = "schar" + self.typeCode = "b" + +###################################################################### + +class ucharTestCase(FortranTestCase): + def __init__(self, methodName="runTest"): + FortranTestCase.__init__(self, methodName) + self.typeStr = "uchar" + self.typeCode = "B" + +###################################################################### + +class shortTestCase(FortranTestCase): + def __init__(self, methodName="runTest"): + FortranTestCase.__init__(self, methodName) + self.typeStr = "short" + self.typeCode = "h" + +###################################################################### + +class ushortTestCase(FortranTestCase): + def __init__(self, methodName="runTest"): + FortranTestCase.__init__(self, methodName) + self.typeStr = "ushort" + self.typeCode = "H" + +###################################################################### + +class intTestCase(FortranTestCase): + def __init__(self, methodName="runTest"): + FortranTestCase.__init__(self, methodName) + self.typeStr = "int" + self.typeCode = "i" + +###################################################################### + +class uintTestCase(FortranTestCase): + def __init__(self, methodName="runTest"): + FortranTestCase.__init__(self, methodName) + self.typeStr = "uint" + self.typeCode = "I" + +###################################################################### + +class longTestCase(FortranTestCase): + def __init__(self, methodName="runTest"): + FortranTestCase.__init__(self, methodName) + self.typeStr = "long" + self.typeCode = "l" + +###################################################################### + +class ulongTestCase(FortranTestCase): + def __init__(self, methodName="runTest"): + FortranTestCase.__init__(self, methodName) + self.typeStr = "ulong" + self.typeCode = "L" + +###################################################################### + +class longLongTestCase(FortranTestCase): + def __init__(self, methodName="runTest"): + FortranTestCase.__init__(self, methodName) + self.typeStr = "longLong" + self.typeCode = "q" + +###################################################################### + +class ulongLongTestCase(FortranTestCase): + def __init__(self, methodName="runTest"): + FortranTestCase.__init__(self, methodName) + self.typeStr = "ulongLong" + self.typeCode = "Q" + +###################################################################### + +class floatTestCase(FortranTestCase): + def __init__(self, methodName="runTest"): + FortranTestCase.__init__(self, methodName) + self.typeStr = "float" + self.typeCode = "f" + +###################################################################### + +class doubleTestCase(FortranTestCase): + def __init__(self, methodName="runTest"): + FortranTestCase.__init__(self, methodName) + self.typeStr = "double" + self.typeCode = "d" + +###################################################################### + +if __name__ == "__main__": + + # Build the test suite + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite( scharTestCase)) + suite.addTest(unittest.makeSuite( ucharTestCase)) + suite.addTest(unittest.makeSuite( shortTestCase)) + suite.addTest(unittest.makeSuite( ushortTestCase)) + suite.addTest(unittest.makeSuite( intTestCase)) + suite.addTest(unittest.makeSuite( uintTestCase)) + suite.addTest(unittest.makeSuite( longTestCase)) + suite.addTest(unittest.makeSuite( ulongTestCase)) + suite.addTest(unittest.makeSuite( longLongTestCase)) + suite.addTest(unittest.makeSuite(ulongLongTestCase)) + suite.addTest(unittest.makeSuite( floatTestCase)) + suite.addTest(unittest.makeSuite( doubleTestCase)) + + # Execute the test suite + print "Testing 2D Functions of Module Matrix" + print "NumPy version", np.__version__ + print + result = unittest.TextTestRunner(verbosity=2).run(suite) + sys.exit(len(result.errors) + len(result.failures)) diff --git a/doc/swig/test/testMatrix.py b/doc/swig/test/testMatrix.py new file mode 100755 index 000000000..12061702d --- /dev/null +++ b/doc/swig/test/testMatrix.py @@ -0,0 +1,361 @@ +#! /usr/bin/env python + +# System imports +from distutils.util import get_platform +import os +import sys +import unittest + +# Import NumPy +import numpy as np +major, minor = [ int(d) for d in np.__version__.split(".")[:2] ] +if major == 0: BadListError = TypeError +else: BadListError = ValueError + +import Matrix + +###################################################################### + +class MatrixTestCase(unittest.TestCase): + + def __init__(self, methodName="runTests"): + unittest.TestCase.__init__(self, methodName) + self.typeStr = "double" + self.typeCode = "d" + + # Test (type IN_ARRAY2[ANY][ANY]) typemap + def testDet(self): + "Test det function" + print >>sys.stderr, self.typeStr, "... ", + det = Matrix.__dict__[self.typeStr + "Det"] + matrix = [[8,7],[6,9]] + self.assertEquals(det(matrix), 30) + + # Test (type IN_ARRAY2[ANY][ANY]) typemap + def testDetBadList(self): + "Test det function with bad list" + print >>sys.stderr, self.typeStr, "... ", + det = Matrix.__dict__[self.typeStr + "Det"] + matrix = [[8,7], ["e", "pi"]] + self.assertRaises(BadListError, det, matrix) + + # Test (type IN_ARRAY2[ANY][ANY]) typemap + def testDetWrongDim(self): + "Test det function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + det = Matrix.__dict__[self.typeStr + "Det"] + matrix = [8,7] + self.assertRaises(TypeError, det, matrix) + + # Test (type IN_ARRAY2[ANY][ANY]) typemap + def testDetWrongSize(self): + "Test det function with wrong size" + print >>sys.stderr, self.typeStr, "... ", + det = Matrix.__dict__[self.typeStr + "Det"] + matrix = [[8,7,6], [5,4,3], [2,1,0]] + self.assertRaises(TypeError, det, matrix) + + # Test (type IN_ARRAY2[ANY][ANY]) typemap + def testDetNonContainer(self): + "Test det function with non-container" + print >>sys.stderr, self.typeStr, "... ", + det = Matrix.__dict__[self.typeStr + "Det"] + self.assertRaises(TypeError, det, None) + + # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap + def testMax(self): + "Test max function" + print >>sys.stderr, self.typeStr, "... ", + max = Matrix.__dict__[self.typeStr + "Max"] + matrix = [[6,5,4],[3,2,1]] + self.assertEquals(max(matrix), 6) + + # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap + def testMaxBadList(self): + "Test max function with bad list" + print >>sys.stderr, self.typeStr, "... ", + max = Matrix.__dict__[self.typeStr + "Max"] + matrix = [[6,"five",4], ["three", 2, "one"]] + self.assertRaises(BadListError, max, matrix) + + # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap + def testMaxNonContainer(self): + "Test max function with non-container" + print >>sys.stderr, self.typeStr, "... ", + max = Matrix.__dict__[self.typeStr + "Max"] + self.assertRaises(TypeError, max, None) + + # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap + def testMaxWrongDim(self): + "Test max function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + max = Matrix.__dict__[self.typeStr + "Max"] + self.assertRaises(TypeError, max, [0, 1, 2, 3]) + + # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap + def testMin(self): + "Test min function" + print >>sys.stderr, self.typeStr, "... ", + min = Matrix.__dict__[self.typeStr + "Min"] + matrix = [[9,8],[7,6],[5,4]] + self.assertEquals(min(matrix), 4) + + # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap + def testMinBadList(self): + "Test min function with bad list" + print >>sys.stderr, self.typeStr, "... ", + min = Matrix.__dict__[self.typeStr + "Min"] + matrix = [["nine","eight"], ["seven","six"]] + self.assertRaises(BadListError, min, matrix) + + # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap + def testMinWrongDim(self): + "Test min function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + min = Matrix.__dict__[self.typeStr + "Min"] + self.assertRaises(TypeError, min, [1,3,5,7,9]) + + # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap + def testMinNonContainer(self): + "Test min function with non-container" + print >>sys.stderr, self.typeStr, "... ", + min = Matrix.__dict__[self.typeStr + "Min"] + self.assertRaises(TypeError, min, False) + + # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap + def testScale(self): + "Test scale function" + print >>sys.stderr, self.typeStr, "... ", + scale = Matrix.__dict__[self.typeStr + "Scale"] + matrix = np.array([[1,2,3],[2,1,2],[3,2,1]],self.typeCode) + scale(matrix,4) + self.assertEquals((matrix == [[4,8,12],[8,4,8],[12,8,4]]).all(), True) + + # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap + def testScaleWrongDim(self): + "Test scale function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + scale = Matrix.__dict__[self.typeStr + "Scale"] + matrix = np.array([1,2,2,1],self.typeCode) + self.assertRaises(TypeError, scale, matrix) + + # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap + def testScaleWrongSize(self): + "Test scale function with wrong size" + print >>sys.stderr, self.typeStr, "... ", + scale = Matrix.__dict__[self.typeStr + "Scale"] + matrix = np.array([[1,2],[2,1]],self.typeCode) + self.assertRaises(TypeError, scale, matrix) + + # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap + def testScaleWrongType(self): + "Test scale function with wrong type" + print >>sys.stderr, self.typeStr, "... ", + scale = Matrix.__dict__[self.typeStr + "Scale"] + matrix = np.array([[1,2,3],[2,1,2],[3,2,1]],'c') + self.assertRaises(TypeError, scale, matrix) + + # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap + def testScaleNonArray(self): + "Test scale function with non-array" + print >>sys.stderr, self.typeStr, "... ", + scale = Matrix.__dict__[self.typeStr + "Scale"] + matrix = [[1,2,3],[2,1,2],[3,2,1]] + self.assertRaises(TypeError, scale, matrix) + + # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap + def testFloor(self): + "Test floor function" + print >>sys.stderr, self.typeStr, "... ", + floor = Matrix.__dict__[self.typeStr + "Floor"] + matrix = np.array([[6,7],[8,9]],self.typeCode) + floor(matrix,7) + np.testing.assert_array_equal(matrix, np.array([[7,7],[8,9]])) + + # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap + def testFloorWrongDim(self): + "Test floor function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + floor = Matrix.__dict__[self.typeStr + "Floor"] + matrix = np.array([6,7,8,9],self.typeCode) + self.assertRaises(TypeError, floor, matrix) + + # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap + def testFloorWrongType(self): + "Test floor function with wrong type" + print >>sys.stderr, self.typeStr, "... ", + floor = Matrix.__dict__[self.typeStr + "Floor"] + matrix = np.array([[6,7], [8,9]],'c') + self.assertRaises(TypeError, floor, matrix) + + # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap + def testFloorNonArray(self): + "Test floor function with non-array" + print >>sys.stderr, self.typeStr, "... ", + floor = Matrix.__dict__[self.typeStr + "Floor"] + matrix = [[6,7], [8,9]] + self.assertRaises(TypeError, floor, matrix) + + # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap + def testCeil(self): + "Test ceil function" + print >>sys.stderr, self.typeStr, "... ", + ceil = Matrix.__dict__[self.typeStr + "Ceil"] + matrix = np.array([[1,2],[3,4]],self.typeCode) + ceil(matrix,3) + np.testing.assert_array_equal(matrix, np.array([[1,2],[3,3]])) + + # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap + def testCeilWrongDim(self): + "Test ceil function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + ceil = Matrix.__dict__[self.typeStr + "Ceil"] + matrix = np.array([1,2,3,4],self.typeCode) + self.assertRaises(TypeError, ceil, matrix) + + # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap + def testCeilWrongType(self): + "Test ceil function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + ceil = Matrix.__dict__[self.typeStr + "Ceil"] + matrix = np.array([[1,2], [3,4]],'c') + self.assertRaises(TypeError, ceil, matrix) + + # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap + def testCeilNonArray(self): + "Test ceil function with non-array" + print >>sys.stderr, self.typeStr, "... ", + ceil = Matrix.__dict__[self.typeStr + "Ceil"] + matrix = [[1,2], [3,4]] + self.assertRaises(TypeError, ceil, matrix) + + # Test (type ARGOUT_ARRAY2[ANY][ANY]) typemap + def testLUSplit(self): + "Test luSplit function" + print >>sys.stderr, self.typeStr, "... ", + luSplit = Matrix.__dict__[self.typeStr + "LUSplit"] + lower, upper = luSplit([[1,2,3],[4,5,6],[7,8,9]]) + self.assertEquals((lower == [[1,0,0],[4,5,0],[7,8,9]]).all(), True) + self.assertEquals((upper == [[0,2,3],[0,0,6],[0,0,0]]).all(), True) + +###################################################################### + +class scharTestCase(MatrixTestCase): + def __init__(self, methodName="runTest"): + MatrixTestCase.__init__(self, methodName) + self.typeStr = "schar" + self.typeCode = "b" + +###################################################################### + +class ucharTestCase(MatrixTestCase): + def __init__(self, methodName="runTest"): + MatrixTestCase.__init__(self, methodName) + self.typeStr = "uchar" + self.typeCode = "B" + +###################################################################### + +class shortTestCase(MatrixTestCase): + def __init__(self, methodName="runTest"): + MatrixTestCase.__init__(self, methodName) + self.typeStr = "short" + self.typeCode = "h" + +###################################################################### + +class ushortTestCase(MatrixTestCase): + def __init__(self, methodName="runTest"): + MatrixTestCase.__init__(self, methodName) + self.typeStr = "ushort" + self.typeCode = "H" + +###################################################################### + +class intTestCase(MatrixTestCase): + def __init__(self, methodName="runTest"): + MatrixTestCase.__init__(self, methodName) + self.typeStr = "int" + self.typeCode = "i" + +###################################################################### + +class uintTestCase(MatrixTestCase): + def __init__(self, methodName="runTest"): + MatrixTestCase.__init__(self, methodName) + self.typeStr = "uint" + self.typeCode = "I" + +###################################################################### + +class longTestCase(MatrixTestCase): + def __init__(self, methodName="runTest"): + MatrixTestCase.__init__(self, methodName) + self.typeStr = "long" + self.typeCode = "l" + +###################################################################### + +class ulongTestCase(MatrixTestCase): + def __init__(self, methodName="runTest"): + MatrixTestCase.__init__(self, methodName) + self.typeStr = "ulong" + self.typeCode = "L" + +###################################################################### + +class longLongTestCase(MatrixTestCase): + def __init__(self, methodName="runTest"): + MatrixTestCase.__init__(self, methodName) + self.typeStr = "longLong" + self.typeCode = "q" + +###################################################################### + +class ulongLongTestCase(MatrixTestCase): + def __init__(self, methodName="runTest"): + MatrixTestCase.__init__(self, methodName) + self.typeStr = "ulongLong" + self.typeCode = "Q" + +###################################################################### + +class floatTestCase(MatrixTestCase): + def __init__(self, methodName="runTest"): + MatrixTestCase.__init__(self, methodName) + self.typeStr = "float" + self.typeCode = "f" + +###################################################################### + +class doubleTestCase(MatrixTestCase): + def __init__(self, methodName="runTest"): + MatrixTestCase.__init__(self, methodName) + self.typeStr = "double" + self.typeCode = "d" + +###################################################################### + +if __name__ == "__main__": + + # Build the test suite + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite( scharTestCase)) + suite.addTest(unittest.makeSuite( ucharTestCase)) + suite.addTest(unittest.makeSuite( shortTestCase)) + suite.addTest(unittest.makeSuite( ushortTestCase)) + suite.addTest(unittest.makeSuite( intTestCase)) + suite.addTest(unittest.makeSuite( uintTestCase)) + suite.addTest(unittest.makeSuite( longTestCase)) + suite.addTest(unittest.makeSuite( ulongTestCase)) + suite.addTest(unittest.makeSuite( longLongTestCase)) + suite.addTest(unittest.makeSuite(ulongLongTestCase)) + suite.addTest(unittest.makeSuite( floatTestCase)) + suite.addTest(unittest.makeSuite( doubleTestCase)) + + # Execute the test suite + print "Testing 2D Functions of Module Matrix" + print "NumPy version", np.__version__ + print + result = unittest.TextTestRunner(verbosity=2).run(suite) + sys.exit(len(result.errors) + len(result.failures)) diff --git a/doc/swig/test/testTensor.py b/doc/swig/test/testTensor.py new file mode 100755 index 000000000..3d0ce097e --- /dev/null +++ b/doc/swig/test/testTensor.py @@ -0,0 +1,401 @@ +#! /usr/bin/env python + +# System imports +from distutils.util import get_platform +from math import sqrt +import os +import sys +import unittest + +# Import NumPy +import numpy as np +major, minor = [ int(d) for d in np.__version__.split(".")[:2] ] +if major == 0: BadListError = TypeError +else: BadListError = ValueError + +import Tensor + +###################################################################### + +class TensorTestCase(unittest.TestCase): + + def __init__(self, methodName="runTests"): + unittest.TestCase.__init__(self, methodName) + self.typeStr = "double" + self.typeCode = "d" + self.result = sqrt(28.0/8) + + # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap + def testNorm(self): + "Test norm function" + print >>sys.stderr, self.typeStr, "... ", + norm = Tensor.__dict__[self.typeStr + "Norm"] + tensor = [[[0,1], [2,3]], + [[3,2], [1,0]]] + if isinstance(self.result, int): + self.assertEquals(norm(tensor), self.result) + else: + self.assertAlmostEqual(norm(tensor), self.result, 6) + + # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap + def testNormBadList(self): + "Test norm function with bad list" + print >>sys.stderr, self.typeStr, "... ", + norm = Tensor.__dict__[self.typeStr + "Norm"] + tensor = [[[0,"one"],[2,3]], + [[3,"two"],[1,0]]] + self.assertRaises(BadListError, norm, tensor) + + # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap + def testNormWrongDim(self): + "Test norm function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + norm = Tensor.__dict__[self.typeStr + "Norm"] + tensor = [[0,1,2,3], + [3,2,1,0]] + self.assertRaises(TypeError, norm, tensor) + + # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap + def testNormWrongSize(self): + "Test norm function with wrong size" + print >>sys.stderr, self.typeStr, "... ", + norm = Tensor.__dict__[self.typeStr + "Norm"] + tensor = [[[0,1,0], [2,3,2]], + [[3,2,3], [1,0,1]]] + self.assertRaises(TypeError, norm, tensor) + + # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap + def testNormNonContainer(self): + "Test norm function with non-container" + print >>sys.stderr, self.typeStr, "... ", + norm = Tensor.__dict__[self.typeStr + "Norm"] + self.assertRaises(TypeError, norm, None) + + # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap + def testMax(self): + "Test max function" + print >>sys.stderr, self.typeStr, "... ", + max = Tensor.__dict__[self.typeStr + "Max"] + tensor = [[[1,2], [3,4]], + [[5,6], [7,8]]] + self.assertEquals(max(tensor), 8) + + # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap + def testMaxBadList(self): + "Test max function with bad list" + print >>sys.stderr, self.typeStr, "... ", + max = Tensor.__dict__[self.typeStr + "Max"] + tensor = [[[1,"two"], [3,4]], + [[5,"six"], [7,8]]] + self.assertRaises(BadListError, max, tensor) + + # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap + def testMaxNonContainer(self): + "Test max function with non-container" + print >>sys.stderr, self.typeStr, "... ", + max = Tensor.__dict__[self.typeStr + "Max"] + self.assertRaises(TypeError, max, None) + + # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap + def testMaxWrongDim(self): + "Test max function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + max = Tensor.__dict__[self.typeStr + "Max"] + self.assertRaises(TypeError, max, [0, -1, 2, -3]) + + # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap + def testMin(self): + "Test min function" + print >>sys.stderr, self.typeStr, "... ", + min = Tensor.__dict__[self.typeStr + "Min"] + tensor = [[[9,8], [7,6]], + [[5,4], [3,2]]] + self.assertEquals(min(tensor), 2) + + # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap + def testMinBadList(self): + "Test min function with bad list" + print >>sys.stderr, self.typeStr, "... ", + min = Tensor.__dict__[self.typeStr + "Min"] + tensor = [[["nine",8], [7,6]], + [["five",4], [3,2]]] + self.assertRaises(BadListError, min, tensor) + + # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap + def testMinNonContainer(self): + "Test min function with non-container" + print >>sys.stderr, self.typeStr, "... ", + min = Tensor.__dict__[self.typeStr + "Min"] + self.assertRaises(TypeError, min, True) + + # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap + def testMinWrongDim(self): + "Test min function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + min = Tensor.__dict__[self.typeStr + "Min"] + self.assertRaises(TypeError, min, [[1,3],[5,7]]) + + # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap + def testScale(self): + "Test scale function" + print >>sys.stderr, self.typeStr, "... ", + scale = Tensor.__dict__[self.typeStr + "Scale"] + tensor = np.array([[[1,0,1], [0,1,0], [1,0,1]], + [[0,1,0], [1,0,1], [0,1,0]], + [[1,0,1], [0,1,0], [1,0,1]]],self.typeCode) + scale(tensor,4) + self.assertEquals((tensor == [[[4,0,4], [0,4,0], [4,0,4]], + [[0,4,0], [4,0,4], [0,4,0]], + [[4,0,4], [0,4,0], [4,0,4]]]).all(), True) + + # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap + def testScaleWrongType(self): + "Test scale function with wrong type" + print >>sys.stderr, self.typeStr, "... ", + scale = Tensor.__dict__[self.typeStr + "Scale"] + tensor = np.array([[[1,0,1], [0,1,0], [1,0,1]], + [[0,1,0], [1,0,1], [0,1,0]], + [[1,0,1], [0,1,0], [1,0,1]]],'c') + self.assertRaises(TypeError, scale, tensor) + + # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap + def testScaleWrongDim(self): + "Test scale function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + scale = Tensor.__dict__[self.typeStr + "Scale"] + tensor = np.array([[1,0,1], [0,1,0], [1,0,1], + [0,1,0], [1,0,1], [0,1,0]],self.typeCode) + self.assertRaises(TypeError, scale, tensor) + + # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap + def testScaleWrongSize(self): + "Test scale function with wrong size" + print >>sys.stderr, self.typeStr, "... ", + scale = Tensor.__dict__[self.typeStr + "Scale"] + tensor = np.array([[[1,0], [0,1], [1,0]], + [[0,1], [1,0], [0,1]], + [[1,0], [0,1], [1,0]]],self.typeCode) + self.assertRaises(TypeError, scale, tensor) + + # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap + def testScaleNonArray(self): + "Test scale function with non-array" + print >>sys.stderr, self.typeStr, "... ", + scale = Tensor.__dict__[self.typeStr + "Scale"] + self.assertRaises(TypeError, scale, True) + + # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap + def testFloor(self): + "Test floor function" + print >>sys.stderr, self.typeStr, "... ", + floor = Tensor.__dict__[self.typeStr + "Floor"] + tensor = np.array([[[1,2], [3,4]], + [[5,6], [7,8]]],self.typeCode) + floor(tensor,4) + np.testing.assert_array_equal(tensor, np.array([[[4,4], [4,4]], + [[5,6], [7,8]]])) + + # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap + def testFloorWrongType(self): + "Test floor function with wrong type" + print >>sys.stderr, self.typeStr, "... ", + floor = Tensor.__dict__[self.typeStr + "Floor"] + tensor = np.array([[[1,2], [3,4]], + [[5,6], [7,8]]],'c') + self.assertRaises(TypeError, floor, tensor) + + # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap + def testFloorWrongDim(self): + "Test floor function with wrong type" + print >>sys.stderr, self.typeStr, "... ", + floor = Tensor.__dict__[self.typeStr + "Floor"] + tensor = np.array([[1,2], [3,4], [5,6], [7,8]],self.typeCode) + self.assertRaises(TypeError, floor, tensor) + + # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap + def testFloorNonArray(self): + "Test floor function with non-array" + print >>sys.stderr, self.typeStr, "... ", + floor = Tensor.__dict__[self.typeStr + "Floor"] + self.assertRaises(TypeError, floor, object) + + # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap + def testCeil(self): + "Test ceil function" + print >>sys.stderr, self.typeStr, "... ", + ceil = Tensor.__dict__[self.typeStr + "Ceil"] + tensor = np.array([[[9,8], [7,6]], + [[5,4], [3,2]]],self.typeCode) + ceil(tensor,5) + np.testing.assert_array_equal(tensor, np.array([[[5,5], [5,5]], + [[5,4], [3,2]]])) + + # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap + def testCeilWrongType(self): + "Test ceil function with wrong type" + print >>sys.stderr, self.typeStr, "... ", + ceil = Tensor.__dict__[self.typeStr + "Ceil"] + tensor = np.array([[[9,8], [7,6]], + [[5,4], [3,2]]],'c') + self.assertRaises(TypeError, ceil, tensor) + + # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap + def testCeilWrongDim(self): + "Test ceil function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + ceil = Tensor.__dict__[self.typeStr + "Ceil"] + tensor = np.array([[9,8], [7,6], [5,4], [3,2]], self.typeCode) + self.assertRaises(TypeError, ceil, tensor) + + # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap + def testCeilNonArray(self): + "Test ceil function with non-array" + print >>sys.stderr, self.typeStr, "... ", + ceil = Tensor.__dict__[self.typeStr + "Ceil"] + tensor = [[[9,8], [7,6]], + [[5,4], [3,2]]] + self.assertRaises(TypeError, ceil, tensor) + + # Test (type ARGOUT_ARRAY3[ANY][ANY][ANY]) typemap + def testLUSplit(self): + "Test luSplit function" + print >>sys.stderr, self.typeStr, "... ", + luSplit = Tensor.__dict__[self.typeStr + "LUSplit"] + lower, upper = luSplit([[[1,1], [1,1]], + [[1,1], [1,1]]]) + self.assertEquals((lower == [[[1,1], [1,0]], + [[1,0], [0,0]]]).all(), True) + self.assertEquals((upper == [[[0,0], [0,1]], + [[0,1], [1,1]]]).all(), True) + +###################################################################### + +class scharTestCase(TensorTestCase): + def __init__(self, methodName="runTest"): + TensorTestCase.__init__(self, methodName) + self.typeStr = "schar" + self.typeCode = "b" + self.result = int(self.result) + +###################################################################### + +class ucharTestCase(TensorTestCase): + def __init__(self, methodName="runTest"): + TensorTestCase.__init__(self, methodName) + self.typeStr = "uchar" + self.typeCode = "B" + self.result = int(self.result) + +###################################################################### + +class shortTestCase(TensorTestCase): + def __init__(self, methodName="runTest"): + TensorTestCase.__init__(self, methodName) + self.typeStr = "short" + self.typeCode = "h" + self.result = int(self.result) + +###################################################################### + +class ushortTestCase(TensorTestCase): + def __init__(self, methodName="runTest"): + TensorTestCase.__init__(self, methodName) + self.typeStr = "ushort" + self.typeCode = "H" + self.result = int(self.result) + +###################################################################### + +class intTestCase(TensorTestCase): + def __init__(self, methodName="runTest"): + TensorTestCase.__init__(self, methodName) + self.typeStr = "int" + self.typeCode = "i" + self.result = int(self.result) + +###################################################################### + +class uintTestCase(TensorTestCase): + def __init__(self, methodName="runTest"): + TensorTestCase.__init__(self, methodName) + self.typeStr = "uint" + self.typeCode = "I" + self.result = int(self.result) + +###################################################################### + +class longTestCase(TensorTestCase): + def __init__(self, methodName="runTest"): + TensorTestCase.__init__(self, methodName) + self.typeStr = "long" + self.typeCode = "l" + self.result = int(self.result) + +###################################################################### + +class ulongTestCase(TensorTestCase): + def __init__(self, methodName="runTest"): + TensorTestCase.__init__(self, methodName) + self.typeStr = "ulong" + self.typeCode = "L" + self.result = int(self.result) + +###################################################################### + +class longLongTestCase(TensorTestCase): + def __init__(self, methodName="runTest"): + TensorTestCase.__init__(self, methodName) + self.typeStr = "longLong" + self.typeCode = "q" + self.result = int(self.result) + +###################################################################### + +class ulongLongTestCase(TensorTestCase): + def __init__(self, methodName="runTest"): + TensorTestCase.__init__(self, methodName) + self.typeStr = "ulongLong" + self.typeCode = "Q" + self.result = int(self.result) + +###################################################################### + +class floatTestCase(TensorTestCase): + def __init__(self, methodName="runTest"): + TensorTestCase.__init__(self, methodName) + self.typeStr = "float" + self.typeCode = "f" + +###################################################################### + +class doubleTestCase(TensorTestCase): + def __init__(self, methodName="runTest"): + TensorTestCase.__init__(self, methodName) + self.typeStr = "double" + self.typeCode = "d" + +###################################################################### + +if __name__ == "__main__": + + # Build the test suite + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite( scharTestCase)) + suite.addTest(unittest.makeSuite( ucharTestCase)) + suite.addTest(unittest.makeSuite( shortTestCase)) + suite.addTest(unittest.makeSuite( ushortTestCase)) + suite.addTest(unittest.makeSuite( intTestCase)) + suite.addTest(unittest.makeSuite( uintTestCase)) + suite.addTest(unittest.makeSuite( longTestCase)) + suite.addTest(unittest.makeSuite( ulongTestCase)) + suite.addTest(unittest.makeSuite( longLongTestCase)) + suite.addTest(unittest.makeSuite(ulongLongTestCase)) + suite.addTest(unittest.makeSuite( floatTestCase)) + suite.addTest(unittest.makeSuite( doubleTestCase)) + + # Execute the test suite + print "Testing 3D Functions of Module Tensor" + print "NumPy version", np.__version__ + print + result = unittest.TextTestRunner(verbosity=2).run(suite) + sys.exit(len(result.errors) + len(result.failures)) diff --git a/doc/swig/test/testVector.py b/doc/swig/test/testVector.py new file mode 100755 index 000000000..2ee918389 --- /dev/null +++ b/doc/swig/test/testVector.py @@ -0,0 +1,380 @@ +#! /usr/bin/env python + +# System imports +from distutils.util import get_platform +import os +import sys +import unittest + +# Import NumPy +import numpy as np +major, minor = [ int(d) for d in np.__version__.split(".")[:2] ] +if major == 0: BadListError = TypeError +else: BadListError = ValueError + +import Vector + +###################################################################### + +class VectorTestCase(unittest.TestCase): + + def __init__(self, methodName="runTest"): + unittest.TestCase.__init__(self, methodName) + self.typeStr = "double" + self.typeCode = "d" + + # Test the (type IN_ARRAY1[ANY]) typemap + def testLength(self): + "Test length function" + print >>sys.stderr, self.typeStr, "... ", + length = Vector.__dict__[self.typeStr + "Length"] + self.assertEquals(length([5, 12, 0]), 13) + + # Test the (type IN_ARRAY1[ANY]) typemap + def testLengthBadList(self): + "Test length function with bad list" + print >>sys.stderr, self.typeStr, "... ", + length = Vector.__dict__[self.typeStr + "Length"] + self.assertRaises(BadListError, length, [5, "twelve", 0]) + + # Test the (type IN_ARRAY1[ANY]) typemap + def testLengthWrongSize(self): + "Test length function with wrong size" + print >>sys.stderr, self.typeStr, "... ", + length = Vector.__dict__[self.typeStr + "Length"] + self.assertRaises(TypeError, length, [5, 12]) + + # Test the (type IN_ARRAY1[ANY]) typemap + def testLengthWrongDim(self): + "Test length function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + length = Vector.__dict__[self.typeStr + "Length"] + self.assertRaises(TypeError, length, [[1,2], [3,4]]) + + # Test the (type IN_ARRAY1[ANY]) typemap + def testLengthNonContainer(self): + "Test length function with non-container" + print >>sys.stderr, self.typeStr, "... ", + length = Vector.__dict__[self.typeStr + "Length"] + self.assertRaises(TypeError, length, None) + + # Test the (type* IN_ARRAY1, int DIM1) typemap + def testProd(self): + "Test prod function" + print >>sys.stderr, self.typeStr, "... ", + prod = Vector.__dict__[self.typeStr + "Prod"] + self.assertEquals(prod([1,2,3,4]), 24) + + # Test the (type* IN_ARRAY1, int DIM1) typemap + def testProdBadList(self): + "Test prod function with bad list" + print >>sys.stderr, self.typeStr, "... ", + prod = Vector.__dict__[self.typeStr + "Prod"] + self.assertRaises(BadListError, prod, [[1,"two"], ["e","pi"]]) + + # Test the (type* IN_ARRAY1, int DIM1) typemap + def testProdWrongDim(self): + "Test prod function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + prod = Vector.__dict__[self.typeStr + "Prod"] + self.assertRaises(TypeError, prod, [[1,2], [8,9]]) + + # Test the (type* IN_ARRAY1, int DIM1) typemap + def testProdNonContainer(self): + "Test prod function with non-container" + print >>sys.stderr, self.typeStr, "... ", + prod = Vector.__dict__[self.typeStr + "Prod"] + self.assertRaises(TypeError, prod, None) + + # Test the (int DIM1, type* IN_ARRAY1) typemap + def testSum(self): + "Test sum function" + print >>sys.stderr, self.typeStr, "... ", + sum = Vector.__dict__[self.typeStr + "Sum"] + self.assertEquals(sum([5,6,7,8]), 26) + + # Test the (int DIM1, type* IN_ARRAY1) typemap + def testSumBadList(self): + "Test sum function with bad list" + print >>sys.stderr, self.typeStr, "... ", + sum = Vector.__dict__[self.typeStr + "Sum"] + self.assertRaises(BadListError, sum, [3,4, 5, "pi"]) + + # Test the (int DIM1, type* IN_ARRAY1) typemap + def testSumWrongDim(self): + "Test sum function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + sum = Vector.__dict__[self.typeStr + "Sum"] + self.assertRaises(TypeError, sum, [[3,4], [5,6]]) + + # Test the (int DIM1, type* IN_ARRAY1) typemap + def testSumNonContainer(self): + "Test sum function with non-container" + print >>sys.stderr, self.typeStr, "... ", + sum = Vector.__dict__[self.typeStr + "Sum"] + self.assertRaises(TypeError, sum, True) + + # Test the (type INPLACE_ARRAY1[ANY]) typemap + def testReverse(self): + "Test reverse function" + print >>sys.stderr, self.typeStr, "... ", + reverse = Vector.__dict__[self.typeStr + "Reverse"] + vector = np.array([1,2,4],self.typeCode) + reverse(vector) + self.assertEquals((vector == [4,2,1]).all(), True) + + # Test the (type INPLACE_ARRAY1[ANY]) typemap + def testReverseWrongDim(self): + "Test reverse function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + reverse = Vector.__dict__[self.typeStr + "Reverse"] + vector = np.array([[1,2], [3,4]],self.typeCode) + self.assertRaises(TypeError, reverse, vector) + + # Test the (type INPLACE_ARRAY1[ANY]) typemap + def testReverseWrongSize(self): + "Test reverse function with wrong size" + print >>sys.stderr, self.typeStr, "... ", + reverse = Vector.__dict__[self.typeStr + "Reverse"] + vector = np.array([9,8,7,6,5,4],self.typeCode) + self.assertRaises(TypeError, reverse, vector) + + # Test the (type INPLACE_ARRAY1[ANY]) typemap + def testReverseWrongType(self): + "Test reverse function with wrong type" + print >>sys.stderr, self.typeStr, "... ", + reverse = Vector.__dict__[self.typeStr + "Reverse"] + vector = np.array([1,2,4],'c') + self.assertRaises(TypeError, reverse, vector) + + # Test the (type INPLACE_ARRAY1[ANY]) typemap + def testReverseNonArray(self): + "Test reverse function with non-array" + print >>sys.stderr, self.typeStr, "... ", + reverse = Vector.__dict__[self.typeStr + "Reverse"] + self.assertRaises(TypeError, reverse, [2,4,6]) + + # Test the (type* INPLACE_ARRAY1, int DIM1) typemap + def testOnes(self): + "Test ones function" + print >>sys.stderr, self.typeStr, "... ", + ones = Vector.__dict__[self.typeStr + "Ones"] + vector = np.zeros(5,self.typeCode) + ones(vector) + np.testing.assert_array_equal(vector, np.array([1,1,1,1,1])) + + # Test the (type* INPLACE_ARRAY1, int DIM1) typemap + def testOnesWrongDim(self): + "Test ones function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + ones = Vector.__dict__[self.typeStr + "Ones"] + vector = np.zeros((5,5),self.typeCode) + self.assertRaises(TypeError, ones, vector) + + # Test the (type* INPLACE_ARRAY1, int DIM1) typemap + def testOnesWrongType(self): + "Test ones function with wrong type" + print >>sys.stderr, self.typeStr, "... ", + ones = Vector.__dict__[self.typeStr + "Ones"] + vector = np.zeros((5,5),'c') + self.assertRaises(TypeError, ones, vector) + + # Test the (type* INPLACE_ARRAY1, int DIM1) typemap + def testOnesNonArray(self): + "Test ones function with non-array" + print >>sys.stderr, self.typeStr, "... ", + ones = Vector.__dict__[self.typeStr + "Ones"] + self.assertRaises(TypeError, ones, [2,4,6,8]) + + # Test the (int DIM1, type* INPLACE_ARRAY1) typemap + def testZeros(self): + "Test zeros function" + print >>sys.stderr, self.typeStr, "... ", + zeros = Vector.__dict__[self.typeStr + "Zeros"] + vector = np.ones(5,self.typeCode) + zeros(vector) + np.testing.assert_array_equal(vector, np.array([0,0,0,0,0])) + + # Test the (int DIM1, type* INPLACE_ARRAY1) typemap + def testZerosWrongDim(self): + "Test zeros function with wrong dimensions" + print >>sys.stderr, self.typeStr, "... ", + zeros = Vector.__dict__[self.typeStr + "Zeros"] + vector = np.ones((5,5),self.typeCode) + self.assertRaises(TypeError, zeros, vector) + + # Test the (int DIM1, type* INPLACE_ARRAY1) typemap + def testZerosWrongType(self): + "Test zeros function with wrong type" + print >>sys.stderr, self.typeStr, "... ", + zeros = Vector.__dict__[self.typeStr + "Zeros"] + vector = np.ones(6,'c') + self.assertRaises(TypeError, zeros, vector) + + # Test the (int DIM1, type* INPLACE_ARRAY1) typemap + def testZerosNonArray(self): + "Test zeros function with non-array" + print >>sys.stderr, self.typeStr, "... ", + zeros = Vector.__dict__[self.typeStr + "Zeros"] + self.assertRaises(TypeError, zeros, [1,3,5,7,9]) + + # Test the (type ARGOUT_ARRAY1[ANY]) typemap + def testEOSplit(self): + "Test eoSplit function" + print >>sys.stderr, self.typeStr, "... ", + eoSplit = Vector.__dict__[self.typeStr + "EOSplit"] + even, odd = eoSplit([1,2,3]) + self.assertEquals((even == [1,0,3]).all(), True) + self.assertEquals((odd == [0,2,0]).all(), True) + + # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap + def testTwos(self): + "Test twos function" + print >>sys.stderr, self.typeStr, "... ", + twos = Vector.__dict__[self.typeStr + "Twos"] + vector = twos(5) + self.assertEquals((vector == [2,2,2,2,2]).all(), True) + + # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap + def testTwosNonInt(self): + "Test twos function with non-integer dimension" + print >>sys.stderr, self.typeStr, "... ", + twos = Vector.__dict__[self.typeStr + "Twos"] + self.assertRaises(TypeError, twos, 5.0) + + # Test the (int DIM1, type* ARGOUT_ARRAY1) typemap + def testThrees(self): + "Test threes function" + print >>sys.stderr, self.typeStr, "... ", + threes = Vector.__dict__[self.typeStr + "Threes"] + vector = threes(6) + self.assertEquals((vector == [3,3,3,3,3,3]).all(), True) + + # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap + def testThreesNonInt(self): + "Test threes function with non-integer dimension" + print >>sys.stderr, self.typeStr, "... ", + threes = Vector.__dict__[self.typeStr + "Threes"] + self.assertRaises(TypeError, threes, "threes") + +###################################################################### + +class scharTestCase(VectorTestCase): + def __init__(self, methodName="runTest"): + VectorTestCase.__init__(self, methodName) + self.typeStr = "schar" + self.typeCode = "b" + +###################################################################### + +class ucharTestCase(VectorTestCase): + def __init__(self, methodName="runTest"): + VectorTestCase.__init__(self, methodName) + self.typeStr = "uchar" + self.typeCode = "B" + +###################################################################### + +class shortTestCase(VectorTestCase): + def __init__(self, methodName="runTest"): + VectorTestCase.__init__(self, methodName) + self.typeStr = "short" + self.typeCode = "h" + +###################################################################### + +class ushortTestCase(VectorTestCase): + def __init__(self, methodName="runTest"): + VectorTestCase.__init__(self, methodName) + self.typeStr = "ushort" + self.typeCode = "H" + +###################################################################### + +class intTestCase(VectorTestCase): + def __init__(self, methodName="runTest"): + VectorTestCase.__init__(self, methodName) + self.typeStr = "int" + self.typeCode = "i" + +###################################################################### + +class uintTestCase(VectorTestCase): + def __init__(self, methodName="runTest"): + VectorTestCase.__init__(self, methodName) + self.typeStr = "uint" + self.typeCode = "I" + +###################################################################### + +class longTestCase(VectorTestCase): + def __init__(self, methodName="runTest"): + VectorTestCase.__init__(self, methodName) + self.typeStr = "long" + self.typeCode = "l" + +###################################################################### + +class ulongTestCase(VectorTestCase): + def __init__(self, methodName="runTest"): + VectorTestCase.__init__(self, methodName) + self.typeStr = "ulong" + self.typeCode = "L" + +###################################################################### + +class longLongTestCase(VectorTestCase): + def __init__(self, methodName="runTest"): + VectorTestCase.__init__(self, methodName) + self.typeStr = "longLong" + self.typeCode = "q" + +###################################################################### + +class ulongLongTestCase(VectorTestCase): + def __init__(self, methodName="runTest"): + VectorTestCase.__init__(self, methodName) + self.typeStr = "ulongLong" + self.typeCode = "Q" + +###################################################################### + +class floatTestCase(VectorTestCase): + def __init__(self, methodName="runTest"): + VectorTestCase.__init__(self, methodName) + self.typeStr = "float" + self.typeCode = "f" + +###################################################################### + +class doubleTestCase(VectorTestCase): + def __init__(self, methodName="runTest"): + VectorTestCase.__init__(self, methodName) + self.typeStr = "double" + self.typeCode = "d" + +###################################################################### + +if __name__ == "__main__": + + # Build the test suite + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite( scharTestCase)) + suite.addTest(unittest.makeSuite( ucharTestCase)) + suite.addTest(unittest.makeSuite( shortTestCase)) + suite.addTest(unittest.makeSuite( ushortTestCase)) + suite.addTest(unittest.makeSuite( intTestCase)) + suite.addTest(unittest.makeSuite( uintTestCase)) + suite.addTest(unittest.makeSuite( longTestCase)) + suite.addTest(unittest.makeSuite( ulongTestCase)) + suite.addTest(unittest.makeSuite( longLongTestCase)) + suite.addTest(unittest.makeSuite(ulongLongTestCase)) + suite.addTest(unittest.makeSuite( floatTestCase)) + suite.addTest(unittest.makeSuite( doubleTestCase)) + + # Execute the test suite + print "Testing 1D Functions of Module Vector" + print "NumPy version", np.__version__ + print + result = unittest.TextTestRunner(verbosity=2).run(suite) + sys.exit(len(result.errors) + len(result.failures)) -- cgit v1.2.1